Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .ai/claude.prompt.md +9 -0
- .ai/codex.prompt.md +27 -0
- .ai/context/overview.md +103 -0
- .ai/gemini.prompt.md +9 -0
- .github/FUNDING.yml +3 -0
- .github/workflows/ruff-lint.yml +48 -0
- .ipynb_checkpoints/run_cache-checkpoint.sh +70 -0
- .venv/pyvenv.cfg +5 -0
- docs/advanced_config.md +831 -0
- docs/dataset_config.md +724 -0
- docs/flux_2.md +285 -0
- docs/flux_kontext.md +190 -0
- docs/framepack.md +618 -0
- docs/framepack_1f.md +367 -0
- docs/hunyuan_video.md +553 -0
- docs/hunyuan_video_1_5.md +372 -0
- docs/kandinsky5.md +476 -0
- docs/loha_lokr.md +341 -0
- docs/ltx_2.md +0 -0
- docs/qwen_image.md +618 -0
- docs/sampling_during_training.md +141 -0
- docs/tools.md +406 -0
- docs/torch_compile.md +399 -0
- docs/wan.md +628 -0
- docs/wan_1f.md +175 -0
- docs/zimage.md +404 -0
- logs/ltx2_cache_gpu2.log +0 -0
- logs/ltx2_cache_gpu7.log +0 -0
- ltx2_train.py +6 -0
- merge_lora.py +4 -0
- qwen_image_train.py +4 -0
- qwen_image_train_network.py +4 -0
- run_cache.sh +70 -0
- scripts/merge_dit_to_comfy.py +107 -0
- src/musubi_tuner/audio_io_utils.py +46 -0
- src/musubi_tuner/audio_loss_balance.py +45 -0
- src/musubi_tuner/audio_supervision.py +86 -0
- src/musubi_tuner/audio_utils.py +76 -0
- src/musubi_tuner/cache_latents.py +440 -0
- src/musubi_tuner/cache_text_encoder_outputs.py +234 -0
- src/musubi_tuner/caption_images_by_qwen_vl.py +256 -0
- src/musubi_tuner/convert_lora.py +224 -0
- src/musubi_tuner/crepa.py +415 -0
- src/musubi_tuner/flux_2_cache_latents.py +126 -0
- src/musubi_tuner/flux_2_cache_text_encoder_outputs.py +91 -0
- src/musubi_tuner/flux_2_generate_image.py +1214 -0
- src/musubi_tuner/flux_2_train_network.py +363 -0
- src/musubi_tuner/fpack_cache_text_encoder_outputs.py +109 -0
- src/musubi_tuner/fpack_generate_video.py +2210 -0
- src/musubi_tuner/fpack_train_network.py +637 -0
.ai/claude.prompt.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## About This File
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## 1. Project Context
|
| 6 |
+
Here is the essential context for our project. Please read and understand it thoroughly.
|
| 7 |
+
|
| 8 |
+
### Project Overview
|
| 9 |
+
@./context/overview.md
|
.ai/codex.prompt.md
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## About This File
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Codex CLI when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## Guidelines
|
| 6 |
+
|
| 7 |
+
### Coding Style & Naming Conventions
|
| 8 |
+
- Style: PEP 8, 4‑space indentation, limit lines to ~120 chars.
|
| 9 |
+
- Naming: snake_case for files/functions (`*_train_network.py`, `*_generate_*`), PascalCase for classes.
|
| 10 |
+
- Types/Docs: Prefer type hints for public APIs and short docstrings describing args/returns.
|
| 11 |
+
- Formatting: No formatter configured; keep diffs small and consistent with surrounding code.
|
| 12 |
+
|
| 13 |
+
### Testing Guidelines
|
| 14 |
+
- Current state: No formal test suite.
|
| 15 |
+
- If adding tests, use `pytest`, place under `tests/` mirroring `src/musubi_tuner/` and name files `test_*.py`.
|
| 16 |
+
- Run (uv): `uv run pytest -q`. Run (pip): `pytest -q`.
|
| 17 |
+
- Prefer small, deterministic unit tests around data utilities and argument parsing.
|
| 18 |
+
|
| 19 |
+
### Commit & Pull Request Guidelines
|
| 20 |
+
- Commits: Use Conventional Commit style seen in history (`feat:`, `fix:`, `doc:`). Write clear, scoped messages.
|
| 21 |
+
- PRs: Include a summary, rationale, linked issue(s), and reproduction commands (e.g., the exact `python ... --args`). Add screenshots/log snippets when relevant.
|
| 22 |
+
- Docs: Update related files in `docs/` when changing behavior or flags.
|
| 23 |
+
|
| 24 |
+
### Security & Configuration Tips
|
| 25 |
+
- Large files: Do not commit datasets, model weights, or logs (`logs/` is ignored). Use external storage.
|
| 26 |
+
- Credentials: Keep any tokens/keys out of the repo and environment‑specific.
|
| 27 |
+
- CUDA: Choose the matching extra (`cu124`, `cu128` or `cu130`) for your driver; verify with `torch.cuda.is_available()`.
|
.ai/context/overview.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# overview.md
|
| 2 |
+
|
| 3 |
+
This file provides guidance to developers when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## Project Overview
|
| 6 |
+
|
| 7 |
+
Musubi Tuner is a Python-based training framework for LoRA (Low-Rank Adaptation) models with multiple video generation architectures including HunyuanVideo, HunyuanVideo 1.5, Wan2.1/2.2, FramePack, FLUX.1 Kontext/FLUX.2, Z-Image and Qwen-Image/Qwen-Image-Edit series/Qwen-Image-Layered. The project focuses on memory-efficient training and inference for video generation models.
|
| 8 |
+
|
| 9 |
+
## Installation and Environment
|
| 10 |
+
|
| 11 |
+
The project uses `pyproject.toml` for dependency management with both pip and uv (experimental) installation methods:
|
| 12 |
+
|
| 13 |
+
- **pip installation**: `pip install -e .` after installing PyTorch with CUDA support
|
| 14 |
+
- **uv installation**: `uv run --extra cu124` (or `cu128`, `cu130`) (uv installation is experimental)
|
| 15 |
+
- **Python requirement**: 3.10 or later (verified with 3.10)
|
| 16 |
+
- **PyTorch requirement**: 2.5.1 or later
|
| 17 |
+
|
| 18 |
+
Optional dependencies include `ascii-magic`, `matplotlib`, `tensorboard`, and `prompt-toolkit`.
|
| 19 |
+
|
| 20 |
+
## Common Development Commands
|
| 21 |
+
|
| 22 |
+
### Dataset Preparation
|
| 23 |
+
```bash
|
| 24 |
+
# Cache latents (required before training)
|
| 25 |
+
python src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/vae --vae_chunk_size 32 --vae_tiling
|
| 26 |
+
|
| 27 |
+
# Cache text encoder outputs (required before training)
|
| 28 |
+
python src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/te1 --text_encoder2 path/to/te2 --batch_size 16
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
`wan_cache_latents.py`, `qwen_image_cache_latents.py` etc. are similar for other architectures.
|
| 32 |
+
|
| 33 |
+
### Training Commands
|
| 34 |
+
```bash
|
| 35 |
+
# HunyuanVideo training
|
| 36 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --dit path/to/dit --dataset_config path/to/toml --network_module networks.lora --network_dim 32
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
`wan_train_network.py`, `qwen_image_train_network.py` etc. are similar for other architectures.
|
| 40 |
+
|
| 41 |
+
Full fine-tuning is also supported for Qwen-Image series with a separate script `qwen_image_train.py` and appropriate arguments.
|
| 42 |
+
|
| 43 |
+
### Inference Commands
|
| 44 |
+
```bash
|
| 45 |
+
# HunyuanVideo inference
|
| 46 |
+
python src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --prompt "text" --dit path/to/dit --vae path/to/vae
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
`wan_generate_video.py`, `qwen_image_generate.py` etc. are similar for other architectures.
|
| 50 |
+
|
| 51 |
+
### Utility Commands
|
| 52 |
+
```bash
|
| 53 |
+
# Merge LoRA weights
|
| 54 |
+
python src/musubi_tuner/merge_lora.py --dit path/to/dit --lora_weight path/to/lora.safetensors --save_merged_model path/to/output
|
| 55 |
+
|
| 56 |
+
# Convert LoRA formats
|
| 57 |
+
python src/musubi_tuner/convert_lora.py --input path/to/lora.safetensors --output path/to/converted.safetensors --target other
|
| 58 |
+
|
| 59 |
+
# Post-hoc EMA for LoRA
|
| 60 |
+
python src/musubi_tuner/lora_post_hoc_ema.py [args]
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### Testing and Development
|
| 64 |
+
No formal test suite is present in this repository. The project relies on manual testing through the training and inference scripts.
|
| 65 |
+
|
| 66 |
+
## Code Architecture
|
| 67 |
+
|
| 68 |
+
### Core Structure
|
| 69 |
+
- `src/musubi_tuner/`: Main package containing all training and inference scripts
|
| 70 |
+
- `src/musubi_tuner/dataset/`: Dataset configuration and loading utilities
|
| 71 |
+
- `src/musubi_tuner/modules/`: Model architectures and components
|
| 72 |
+
- `src/musubi_tuner/networks/`: LoRA network implementations for different architectures
|
| 73 |
+
- `src/musubi_tuner/utils/`: Common utilities for model handling, device management, etc.
|
| 74 |
+
|
| 75 |
+
### Architecture-Specific Modules
|
| 76 |
+
- `hunyuan_model/`: HunyuanVideo model implementation and utilities
|
| 77 |
+
- `wan/`: Wan2.1/2.2 model configurations and modules
|
| 78 |
+
- `qwen_image/`: Qwen-Image model utilities
|
| 79 |
+
- ... and others for FramePack, FLUX, Z-Image
|
| 80 |
+
|
| 81 |
+
### Key Components
|
| 82 |
+
- **Dataset Configuration**: Uses TOML files for complex dataset setups supporting images, videos, control images, and metadata JSONL files
|
| 83 |
+
- **Memory Optimization**: Supports fp8 precision, block swapping, and various attention mechanisms (SDPA, FlashAttention, SageAttention, xformers)
|
| 84 |
+
- **Multi-Architecture Support**: Each architecture has its own training/inference scripts with shared utilities
|
| 85 |
+
- **LoRA Networks**: Modular LoRA implementations with support for different target modules and configurations
|
| 86 |
+
|
| 87 |
+
### Configuration System
|
| 88 |
+
- Dataset configuration uses TOML format with support for multiple datasets, bucketing, and architecture-specific settings
|
| 89 |
+
- Training configuration via command line arguments and accelerate config
|
| 90 |
+
- Support for advanced features like timestep sampling, discrete flow shift, and memory-saving options
|
| 91 |
+
|
| 92 |
+
### Memory Management
|
| 93 |
+
- Aggressive memory optimization with options like `--blocks_to_swap`, `--fp8_base`, `--fp8_llm`
|
| 94 |
+
- VAE tiling or chunking support for handling large resolutions (depending on architecture)
|
| 95 |
+
- Gradient checkpointing and mixed precision training
|
| 96 |
+
- Block-swap (offloading weights to CPU) for large models
|
| 97 |
+
|
| 98 |
+
## Development Notes
|
| 99 |
+
- The project is under active development with experimental features
|
| 100 |
+
- No formal CI/CD or automated testing
|
| 101 |
+
- Uses accelerate for distributed training setup
|
| 102 |
+
- Supports both interactive and batch inference modes
|
| 103 |
+
- Comprehensive documentation in `docs/` directory for advanced configurations and architecture-specific guides
|
.ai/gemini.prompt.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## About This File
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Gemini CLI (https://github.com/google-gemini/gemini-cli) when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## 1. Project Context
|
| 6 |
+
Here is the essential context for our project. Please read and understand it thoroughly.
|
| 7 |
+
|
| 8 |
+
### Project Overview
|
| 9 |
+
@./context/overview.md
|
.github/FUNDING.yml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# These are supported funding model platforms
|
| 2 |
+
|
| 3 |
+
github: kohya-ss
|
.github/workflows/ruff-lint.yml
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Ruff Lint
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
pull_request:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
push:
|
| 8 |
+
paths:
|
| 9 |
+
- "src/musubi_tuner/**/*.py"
|
| 10 |
+
- "src/musubi_tuner/**/*.pth"
|
| 11 |
+
- ".python-version"
|
| 12 |
+
- "pyproject.toml"
|
| 13 |
+
workflow_dispatch:
|
| 14 |
+
|
| 15 |
+
jobs:
|
| 16 |
+
lint:
|
| 17 |
+
runs-on: ubuntu-latest
|
| 18 |
+
steps:
|
| 19 |
+
- uses: actions/checkout@v4
|
| 20 |
+
|
| 21 |
+
- name: "Set up Python"
|
| 22 |
+
uses: actions/setup-python@v5
|
| 23 |
+
with:
|
| 24 |
+
python-version-file: ".python-version"
|
| 25 |
+
|
| 26 |
+
- name: "Install uv"
|
| 27 |
+
uses: astral-sh/setup-uv@v6
|
| 28 |
+
with:
|
| 29 |
+
enable-cache: true
|
| 30 |
+
|
| 31 |
+
- name: Install the project
|
| 32 |
+
run: uv sync --dev
|
| 33 |
+
|
| 34 |
+
- name: "Run Ruff check"
|
| 35 |
+
uses: astral-sh/ruff-action@v3
|
| 36 |
+
with:
|
| 37 |
+
args: >
|
| 38 |
+
check --fix
|
| 39 |
+
|
| 40 |
+
- name: "Run Ruff format check"
|
| 41 |
+
uses: astral-sh/ruff-action@v3
|
| 42 |
+
with:
|
| 43 |
+
args: >
|
| 44 |
+
format --check
|
| 45 |
+
# will exit with a non-zero code if there are formatting changes
|
| 46 |
+
|
| 47 |
+
# - name: Minimize uv cache
|
| 48 |
+
# run: uv cache prune --ci
|
.ipynb_checkpoints/run_cache-checkpoint.sh
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cd /workspace/musubi-tuner || echo "Failed to open directory"
|
| 2 |
+
|
| 3 |
+
LATENT_BATCH_SIZE=12
|
| 4 |
+
LATENT_NUM_WORKERS=6
|
| 5 |
+
|
| 6 |
+
TEXT_BATCH_SIZE=1
|
| 7 |
+
TEXT_NUM_WORKERS=1
|
| 8 |
+
|
| 9 |
+
mkdir -p logs
|
| 10 |
+
|
| 11 |
+
latent_pids=()
|
| 12 |
+
|
| 13 |
+
for i in 0 1 2 3 4 5 6 7; do
|
| 14 |
+
CUDA_VISIBLE_DEVICES=$i python -m musubi_tuner.ltx2_cache_latents \
|
| 15 |
+
--dataset_config "train/ltxxx_gpu${i}.toml" \
|
| 16 |
+
--ltx2_checkpoint "$LTX2_CKPT" \
|
| 17 |
+
--device cuda \
|
| 18 |
+
--vae_dtype bf16 \
|
| 19 |
+
--ltx2_mode av \
|
| 20 |
+
--ltx2_audio_source video \
|
| 21 |
+
--batch_size "$LATENT_BATCH_SIZE" \
|
| 22 |
+
--num_workers "$LATENT_NUM_WORKERS" \
|
| 23 |
+
--skip_existing \
|
| 24 |
+
> "logs/ltx2_cache_latents_gpu${i}.log" 2>&1 &
|
| 25 |
+
latent_pids+=($!)
|
| 26 |
+
done
|
| 27 |
+
|
| 28 |
+
latent_failed=0
|
| 29 |
+
for pid in "${latent_pids[@]}"; do
|
| 30 |
+
if ! wait "$pid"; then
|
| 31 |
+
latent_failed=1
|
| 32 |
+
fi
|
| 33 |
+
done
|
| 34 |
+
|
| 35 |
+
if[ "$latent_failed" -ne 0 ]; then
|
| 36 |
+
echo "Latent caching failed on at least one GPU. Check logs/ltx2_cache_latents_gpu*.log"
|
| 37 |
+
else
|
| 38 |
+
echo "Latent caching finished successfully on all 8 GPUs. Starting text cache..."
|
| 39 |
+
|
| 40 |
+
text_pids=()
|
| 41 |
+
|
| 42 |
+
for i in 0 1 2 3 4 5 6 7; do
|
| 43 |
+
CUDA_VISIBLE_DEVICES=$i python -m musubi_tuner.ltx2_cache_text_encoder_outputs \
|
| 44 |
+
--dataset_config "train/ltxxx_gpu${i}.toml" \
|
| 45 |
+
--ltx2_checkpoint "$LTX2_CKPT" \
|
| 46 |
+
--gemma_root "$GEMMA_ROOT" \
|
| 47 |
+
--gemma_load_in_8bit \
|
| 48 |
+
--device cuda \
|
| 49 |
+
--mixed_precision bf16 \
|
| 50 |
+
--ltx2_mode av \
|
| 51 |
+
--batch_size "$TEXT_BATCH_SIZE" \
|
| 52 |
+
--num_workers "$TEXT_NUM_WORKERS" \
|
| 53 |
+
--skip_existing \
|
| 54 |
+
> "logs/ltx2_cache_text_gpu${i}.log" 2>&1 &
|
| 55 |
+
text_pids+=($!)
|
| 56 |
+
done
|
| 57 |
+
|
| 58 |
+
text_failed=0
|
| 59 |
+
for pid in "${text_pids[@]}"; do
|
| 60 |
+
if ! wait "$pid"; then
|
| 61 |
+
text_failed=1
|
| 62 |
+
fi
|
| 63 |
+
done
|
| 64 |
+
|
| 65 |
+
if[ "$text_failed" -ne 0 ]; then
|
| 66 |
+
echo "Text caching failed on at least one GPU. Check logs/ltx2_cache_text_gpu*.log"
|
| 67 |
+
else
|
| 68 |
+
echo "Latent cache and text cache both completed successfully."
|
| 69 |
+
fi
|
| 70 |
+
fi
|
.venv/pyvenv.cfg
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
home = /venv/main/bin
|
| 2 |
+
include-system-site-packages = false
|
| 3 |
+
version = 3.12.13
|
| 4 |
+
executable = /venv/main/bin/python3.12
|
| 5 |
+
command = /venv/main/bin/python3 -m venv /workspace/musubi-tuner/.venv
|
docs/advanced_config.md
ADDED
|
@@ -0,0 +1,831 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# Advanced configuration / 高度な設定
|
| 4 |
+
|
| 5 |
+
## Table of contents / 目次
|
| 6 |
+
|
| 7 |
+
- [Using configuration files to specify training options](#using-configuration-files-to-specify-training-options--設定ファイルを使用した学習オプションの指定)
|
| 8 |
+
- [How to specify `network_args`](#how-to-specify-network_args--network_argsの指定方法)
|
| 9 |
+
- [LoRA+](#lora)
|
| 10 |
+
- [Select the target modules of LoRA](#select-the-target-modules-of-lora--loraの対象モジュールを選択する)
|
| 11 |
+
- [Save and view logs in TensorBoard format](#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照)
|
| 12 |
+
- [Save and view logs in wandb](#save-and-view-logs-in-wandb--wandbでログの保存と参照)
|
| 13 |
+
- [FP8 weight optimization for models](#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化)
|
| 14 |
+
- [PyTorch Dynamo optimization for model training](#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化)
|
| 15 |
+
- [MagCache](#magcache)
|
| 16 |
+
- [Style-Friendly SNR Sampler](#style-friendly-snr-sampler)
|
| 17 |
+
- [Specify time step range for training](#specify-time-step-range-for-training--学習時のタイムステップ範囲の指定)
|
| 18 |
+
- [Timestep Bucketing for Uniform Sampling](#timestep-bucketing-for-uniform-sampling--均一なサンプリングのためのtimestep-bucketing)
|
| 19 |
+
- [Schedule Free Optimizer](#schedule-free-optimizer--スケジュールフリーオプティマイザ)
|
| 20 |
+
|
| 21 |
+
[Post-Hoc EMA merging for LoRA](tools.md#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ) is described in the [Tools](tools.md) document.
|
| 22 |
+
|
| 23 |
+
## Using configuration files to specify training options / 設定ファイルを使用した学習オプションの指定
|
| 24 |
+
|
| 25 |
+
Instead of specifying all training options on the command line, you can use a `.toml` configuration file to specify them. This can make it easier to manage and reuse training configurations.
|
| 26 |
+
|
| 27 |
+
Specify the configuration file with the `--config_file` option. The `.toml` extension can be omitted.
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --config_file config.toml
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
The configuration file is a TOML file that can contain any of the command-line options. The file can be organized into sections for readability, but all sections are flattened when parsed, so the section names are ignored.
|
| 34 |
+
|
| 35 |
+
<details>
|
| 36 |
+
<summary>日本語</summary>
|
| 37 |
+
|
| 38 |
+
すべての学習オプションをコマンドラインで指定する代わりに、`.toml`設定ファイルを使用して指定することができます。これにより、学習設定の管理や再利用が容易になります。
|
| 39 |
+
|
| 40 |
+
`--config_file`オプションで設定ファイルを指定します。`.toml`拡張子は省略できます。
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --config_file config.toml
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
設定ファイルは、コマンドラインオプションのいずれかを含むことができるTOMLファイルです。ファイルは読みやすさのためにセクションに分けることができますが、解析時にすべてのセクションがフラット化されるため、セクション名は無視されます。
|
| 47 |
+
|
| 48 |
+
</details>
|
| 49 |
+
|
| 50 |
+
### Example configuration file / 設定ファイルの例
|
| 51 |
+
|
| 52 |
+
```toml
|
| 53 |
+
# config.toml
|
| 54 |
+
dit = "/path/to/dit"
|
| 55 |
+
dataset_config = "/path/to/dataset.toml"
|
| 56 |
+
network_module = "networks.lora"
|
| 57 |
+
network_dim = 32
|
| 58 |
+
network_alpha = 16
|
| 59 |
+
|
| 60 |
+
[optimizer]
|
| 61 |
+
optimizer_type = "AdamW"
|
| 62 |
+
learning_rate = 1e-4
|
| 63 |
+
|
| 64 |
+
[training]
|
| 65 |
+
max_train_epochs = 10
|
| 66 |
+
save_every_n_epochs = 2
|
| 67 |
+
mixed_precision = "bf16"
|
| 68 |
+
|
| 69 |
+
[output]
|
| 70 |
+
output_dir = "/path/to/output"
|
| 71 |
+
output_name = "my_lora"
|
| 72 |
+
logging_dir = "./logs"
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
All options can be specified in the top level or within sections. When parsed, the section structure is ignored and all key-value pairs are combined into a single namespace.
|
| 76 |
+
|
| 77 |
+
Options specified on the command line will override those in the configuration file.
|
| 78 |
+
|
| 79 |
+
```bash
|
| 80 |
+
# This will use the config file but override the learning_rate
|
| 81 |
+
accelerate launch --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --config_file config --learning_rate 2e-4
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
<details>
|
| 85 |
+
<summary>日本語</summary>
|
| 86 |
+
|
| 87 |
+
すべてのオプションは、トップレベルまたはセクション内に指定できます。解析時には、セクション構造は無視され、すべてのキーと値のペアが単一のネームスペースに結合されます。
|
| 88 |
+
|
| 89 |
+
コマンドラインで指定されたオプションは、設定ファイルのオプションを上書きします。
|
| 90 |
+
|
| 91 |
+
```bash
|
| 92 |
+
# 設定ファイルを使用しますが、learning_rateを上書きします
|
| 93 |
+
accelerate launch --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --config_file config --learning_rate 2e-4
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
</details>
|
| 97 |
+
|
| 98 |
+
## How to specify `network_args` / `network_args`の指定方法
|
| 99 |
+
|
| 100 |
+
The `--network_args` option is an option for specifying detailed arguments to LoRA. Specify the arguments in the form of `key=value` in `--network_args`.
|
| 101 |
+
|
| 102 |
+
<details>
|
| 103 |
+
<summary>日本語</summary>
|
| 104 |
+
`--network_args`オプションは、LoRAへの詳細な引数を指定するためのオプションです。`--network_args`には、`key=value`の形式で引数を指定します。
|
| 105 |
+
</details>
|
| 106 |
+
|
| 107 |
+
### Example / 記述例
|
| 108 |
+
|
| 109 |
+
If you specify it on the command line, write as follows. / コマンドラインで指定する場合は以下のように記述します。
|
| 110 |
+
|
| 111 |
+
```bash
|
| 112 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --dit ...
|
| 113 |
+
--network_module networks.lora --network_dim 32
|
| 114 |
+
--network_args "key1=value1" "key2=value2" ...
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
If you specify it in the configuration file, write as follows. / 設定ファイルで指定する場合は以下のように記述します。
|
| 118 |
+
|
| 119 |
+
```toml
|
| 120 |
+
network_args = ["key1=value1", "key2=value2", ...]
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
If you specify `"verbose=True"`, detailed information of LoRA will be displayed. / `"verbose=True"`を指定するとLoRAの詳細な情報が表示されます。
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
--network_args "verbose=True" "key1=value1" "key2=value2" ...
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
## LoRA+
|
| 130 |
+
|
| 131 |
+
LoRA+ is a method to improve the training speed by increasing the learning rate of the UP side (LoRA-B) of LoRA. Specify the multiplier for the learning rate. The original paper recommends 16, but adjust as needed. It seems to be good to start from around 4. For details, please refer to the [related PR of sd-scripts](https://github.com/kohya-ss/sd-scripts/pull/1233).
|
| 132 |
+
|
| 133 |
+
Specify `loraplus_lr_ratio` with `--network_args`.
|
| 134 |
+
|
| 135 |
+
<details>
|
| 136 |
+
<summary>日本語</summary>
|
| 137 |
+
|
| 138 |
+
LoRA+は、LoRAのUP側(LoRA-B)の学習率を上げることで学習速度を向上させる手法です。学習率に対する倍率を指定します。元論文では16を推奨していますが、必要に応じて調整してください。4程度から始めるとよいようです。詳細は[sd-scriptsの関連PR]https://github.com/kohya-ss/sd-scripts/pull/1233)を参照してください。
|
| 139 |
+
|
| 140 |
+
`--network_args`で`loraplus_lr_ratio`を指定します。
|
| 141 |
+
</details>
|
| 142 |
+
|
| 143 |
+
### Example / 記述例
|
| 144 |
+
|
| 145 |
+
```bash
|
| 146 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --dit ...
|
| 147 |
+
--network_module networks.lora --network_dim 32 --network_args "loraplus_lr_ratio=4" ...
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
## Select the target modules of LoRA / LoRAの対象モジュールを選択する
|
| 151 |
+
|
| 152 |
+
*This feature is highly experimental and the specification may change. / この機能は特に実験的なもので、仕様は変更される可能性があります。*
|
| 153 |
+
|
| 154 |
+
By specifying `exclude_patterns` and `include_patterns` with `--network_args`, you can select the target modules of LoRA.
|
| 155 |
+
|
| 156 |
+
`exclude_patterns` excludes modules that match the specified pattern. `include_patterns` targets only modules that match the specified pattern.
|
| 157 |
+
|
| 158 |
+
Specify the values as a list. For example, `"exclude_patterns=[r'.*single_blocks.*', r'.*double_blocks\.[0-9]\..*']"`.
|
| 159 |
+
|
| 160 |
+
The pattern is a regular expression for the module name. The module name is in the form of `double_blocks.0.img_mod.linear` or `single_blocks.39.modulation.linear`. The regular expression is not a partial match but a complete match.
|
| 161 |
+
|
| 162 |
+
The patterns are applied in the order of `exclude_patterns`→`include_patterns`. By default, the Linear layers of `img_mod`, `txt_mod`, and `modulation` of double blocks and single blocks are excluded.
|
| 163 |
+
|
| 164 |
+
(`.*(img_mod|txt_mod|modulation).*` is specified.)
|
| 165 |
+
|
| 166 |
+
<details>
|
| 167 |
+
<summary>日本語</summary>
|
| 168 |
+
|
| 169 |
+
`--network_args`で`exclude_patterns`と`include_patterns`を指定することで、LoRAの対象モジュールを選択することができます。
|
| 170 |
+
|
| 171 |
+
`exclude_patterns`は、指定したパターンに一致するモジュールを除外します。`include_patterns`は、指定したパターンに一致するモジュールのみを対象とします。
|
| 172 |
+
|
| 173 |
+
値は、リストで指定します。`"exclude_patterns=[r'.*single_blocks.*', r'.*double_blocks\.[0-9]\..*']"`のようになります。
|
| 174 |
+
|
| 175 |
+
パターンは、モジュール名に対する正規表現です。モジュール名は、たとえば`double_blocks.0.img_mod.linear`や`single_blocks.39.modulation.linear`のような形式です。正規表現は部分一致ではなく完全一致です。
|
| 176 |
+
|
| 177 |
+
パターンは、`exclude_patterns`→`include_patterns`の順で適用されます。デフォルトは、double blocksとsingle blocksのLinear層のうち、`img_mod`、`txt_mod`、`modulation`が除外されています。
|
| 178 |
+
|
| 179 |
+
(`.*(img_mod|txt_mod|modulation).*`が指定されています。)
|
| 180 |
+
</details>
|
| 181 |
+
|
| 182 |
+
### Example / 記述例
|
| 183 |
+
|
| 184 |
+
Only the modules of double blocks / double blocksのモジュールのみを対象とする場合:
|
| 185 |
+
|
| 186 |
+
```bash
|
| 187 |
+
--network_args "exclude_patterns=[r'.*single_blocks.*']"
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
Only the modules of single blocks from the 10th / single blocksの10番目以降のLinearモジュールのみを対象とする場合:
|
| 191 |
+
|
| 192 |
+
```bash
|
| 193 |
+
--network_args "exclude_patterns=[r'.*']" "include_patterns=[r'.*single_blocks\.\d{2}\.linear.*']"
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
## Save and view logs in TensorBoard format / TensorBoard形式のログの保存と参照
|
| 197 |
+
|
| 198 |
+
Specify the folder to save the logs with the `--logging_dir` option. Logs in TensorBoard format will be saved.
|
| 199 |
+
|
| 200 |
+
For example, if you specify `--logging_dir=logs`, a `logs` folder will be created in the working folder, and logs will be saved in the date folder inside it.
|
| 201 |
+
|
| 202 |
+
Also, if you specify the `--log_prefix` option, the specified string will be added before the date. For example, use `--logging_dir=logs --log_prefix=lora_setting1_` for identification.
|
| 203 |
+
|
| 204 |
+
To view logs in TensorBoard, open another command prompt and activate the virtual environment. Then enter the following in the working folder.
|
| 205 |
+
|
| 206 |
+
```powershell
|
| 207 |
+
tensorboard --logdir=logs
|
| 208 |
+
```
|
| 209 |
+
|
| 210 |
+
(tensorboard installation is required.)
|
| 211 |
+
|
| 212 |
+
Then open a browser and access http://localhost:6006/ to display it.
|
| 213 |
+
|
| 214 |
+
<details>
|
| 215 |
+
<summary>日本語</summary>
|
| 216 |
+
`--logging_dir`オプションにログ保存先フォルダを指定してください。TensorBoard形式のログが保存されます。
|
| 217 |
+
|
| 218 |
+
たとえば`--logging_dir=logs`と指定すると、作業フォルダにlogsフォルダが作成され、その中の日時フォルダにログが保存されます。
|
| 219 |
+
|
| 220 |
+
また`--log_prefix`オプションを指定すると、日時の前に指定した文字列が追加されます。`--logging_dir=logs --log_prefix=lora_setting1_`などとして識別用にお使いください。
|
| 221 |
+
|
| 222 |
+
TensorBoardでログを確認するには、別のコマンドプロンプトを開き、仮想環境を有効にしてから、作業フォルダで以下のように入力します。
|
| 223 |
+
|
| 224 |
+
```powershell
|
| 225 |
+
tensorboard --logdir=logs
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
(tensorboardのインストールが必要です。)
|
| 229 |
+
|
| 230 |
+
その後ブラウザを開き、http://localhost:6006/ へアクセスすると表示されます。
|
| 231 |
+
</details>
|
| 232 |
+
|
| 233 |
+
## Save and view logs in wandb / wandbでログの保存と参照
|
| 234 |
+
|
| 235 |
+
`--log_with wandb` option is available to save logs in wandb format. `tensorboard` or `all` is also available. The default is `tensorboard`.
|
| 236 |
+
|
| 237 |
+
Specify the project name with `--log_tracker_name` when using wandb.
|
| 238 |
+
|
| 239 |
+
<details>
|
| 240 |
+
<summary>日本語</summary>
|
| 241 |
+
`--log_with wandb`オプションを指定するとwandb形式でログを保存することができます。`tensorboard`や`all`も指定可能です。デフォルトは`tensorboard`です。
|
| 242 |
+
|
| 243 |
+
wandbを使用する場合は、`--log_tracker_name`でプロジェクト名を指定してください。
|
| 244 |
+
</details>
|
| 245 |
+
|
| 246 |
+
## FP8 weight optimization for models / モデルの重みのFP8への最適化
|
| 247 |
+
|
| 248 |
+
The `--fp8_scaled` option performs an offline optimization pass that rewrites selected Linear weights into FP8 (E4M3) with block-wise scaling. Compared with the legacy `--fp8` cast, it reduces VRAM usage while maintaining relatively high precision.
|
| 249 |
+
|
| 250 |
+
From v0.2.12, block-wise scaling is supported instead of per-tensor scaling, allowing for higher precision quantization.
|
| 251 |
+
|
| 252 |
+
This flow dequantizes back the weights to the FP16/BF16/FP32 weights during the forward path, and computes in FP16/BF16/FP32. The shared routines live in `src/musubi_tuner/modules/fp8_optimization_utils.py` and are wired into the Wan2.x, FramePack, FLUX.1 Kontext, and Qwen-Image pipelines (except HunyuanVideo, which `--fp8_scaled` is not supported).
|
| 253 |
+
|
| 254 |
+
Acknowledgments: This idea is based on the [implementation](https://github.com/Tencent/HunyuanVideo/blob/7df4a45c7e424a3f6cd7d653a7ff1f60cddc1eb1/hyvideo/modules/fp8_optimization.py) of [HunyuanVideo](https://github.com/Tencent/HunyuanVideo). The selection of high-precision modules is referenced from the [implementation](https://github.com/tdrussell/diffusion-pipe/blob/407c04fdae1c9ab5e67b54d33bef62c3e0a8dbc7/models/wan.py) of [diffusion-pipe](https://github.com/tdrussell/diffusion-pipe). I would like to thank these repositories.
|
| 255 |
+
|
| 256 |
+
<details>
|
| 257 |
+
<summary>日本語</summary>
|
| 258 |
+
|
| 259 |
+
`--fp8_scaled` オプションは、対象の Linear 層の重みを、blockごとに適切な倍率でスケーリングした FP8 (E4M3) に書き換える前処理を実行します。従来の `--fp8` による単純なキャストと比べて、元の精度を比較的保ったまま VRAM を削減できます。
|
| 260 |
+
|
| 261 |
+
v0.2.12から、テンソルごとのスケーリングではなく、ブロック単位のスケーリングに対応しました。これにより、より高い精度での量子化が可能になります。
|
| 262 |
+
|
| 263 |
+
forward の計算は、逆量子化を行なった重みで FP16/BF16 で行われます。共通ルーチンは `src/musubi_tuner/modules/fp8_optimization_utils.py` にあり、Wan 2.x・FramePack・FLUX.1 Kontext・Qwen-Image の各パイプラインで利用されます(HunyuanVideo については `--fp8_scaled` オプションは無効です)。
|
| 264 |
+
|
| 265 |
+
このアイデアは、[HunyuanVideo](https://github.com/Tencent/HunyuanVideo) の [実装](https://github.com/Tencent/HunyuanVideo/blob/7df4a45c7e424a3f6cd7d653a7ff1f60cddc1eb1/hyvideo/modules/fp8_optimization.py) に基づいています。高精度モジュールの選定は、[diffusion-pipe](https://github.com/tdrussell/diffusion-pipe) の [実装](https://github.com/tdrussell/diffusion-pipe/blob/407c04fdae1c9ab5e67b54d33bef62c3e0a8dbc7/models/wan.py) を参考にしています。これらのリポジトリに感謝します。
|
| 266 |
+
|
| 267 |
+
</details>
|
| 268 |
+
|
| 269 |
+
### Usage summary / 使い方のまとめ
|
| 270 |
+
|
| 271 |
+
- Inference: add `--fp8` and `--fp8_scaled` when running `wan_generate_video.py`, `fpack_generate_video.py`, `flux_kontext_generate_image.py`, or `qwen_image_generate_image.py`. HunyuanVideo continues to rely on `--fp8`/`--fp8_fast` without scaled weights.
|
| 272 |
+
- Training: specify `--fp8_base --fp8_scaled` in `wan_train_network.py`, `fpack_train_network.py`,`flux_kontext_train_network.py` and `qwen_image_train_network.py`; the trainers enforce this pairing.
|
| 273 |
+
- Input checkpoints must be FP16/BF16; pre-quantized FP8 weights cannot be re-optimized.
|
| 274 |
+
- LoRA / LyCORIS weights are merged before quantization, so no additional steps are required.
|
| 275 |
+
|
| 276 |
+
<details>
|
| 277 |
+
<summary>日本語</summary>
|
| 278 |
+
|
| 279 |
+
- 推論では `wan_generate_video.py`、`fpack_generate_video.py`、`flux_kontext_generate_image.py`、`qwen_image_generate_image.py` を実行する際に `--fp8` と `--fp8_scaled` を併用してください。HunyuanVideo は引き続き`--fp8` / `--fp8_fast` を使用し、スケーリング付き重みは未対応です。
|
| 280 |
+
- 学習では `wan_train_network.py`、`fpack_train_network.py`、`flux_kontext_train_network.py` で `--fp8_base --fp8_scaled` を指定します。
|
| 281 |
+
- 読み込むチェックポイントは FP16/BF16 である必要があります。あらかじめ FP8 化された重みは再最適化できません。
|
| 282 |
+
- LoRA / LyCORIS の重みは量子化の前に自動でマージされるため、追加作業は不要です。
|
| 283 |
+
|
| 284 |
+
</details>
|
| 285 |
+
|
| 286 |
+
### Implementation highlights / 実装のポイント
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
When `--fp8_scaled` flag is enabled, the loader loads the base weights in FP16/BF16, merges optional LoRA or LyCORIS, and then emits FP8 weights plus matching block-wise `.scale_weight` buffers for the targeted layers. The patched forward either dequantizes back to the original dtype on demand for computation.
|
| 290 |
+
|
| 291 |
+
The current scripts in this repository use FP8 E4M3 format and block-wise quantization, but the implementation supports:
|
| 292 |
+
|
| 293 |
+
- Implements FP8 (E4M3 or E5M2) weight quantization for Linear layers
|
| 294 |
+
- Supports multiple quantization modes: tensor-wise, channel-wise, and block-wise quantization described below
|
| 295 |
+
- Block-wise quantization provides better precision by using granular scaling with configurable block size (default: 64)
|
| 296 |
+
- Reduces VRAM requirements by using 8-bit weights for storage (slightly increased compared to existing `--fp8` `--fp8_base` options)
|
| 297 |
+
- Quantizes weights to FP8 format with appropriate scaling instead of simple cast to FP8
|
| 298 |
+
- Applies monkey patching to Linear layers for transparent dequantization during computation
|
| 299 |
+
- Maintains computational precision by dequantizing to original precision (FP16/BF16) during forward pass
|
| 300 |
+
- Preserves important weights for example norm, embedding, modulation in FP16/BF16 format (fewer exclusions than previous versions)
|
| 301 |
+
|
| 302 |
+
For quantization and precision discussion, see also [Discussion #564](https://github.com/kohya-ss/musubi-tuner/discussions/564).
|
| 303 |
+
|
| 304 |
+
Note: Testing for quantization other than E4M3/block-wise is limited, so please be cautious if you plan to use the code in other projects.
|
| 305 |
+
|
| 306 |
+
<details>
|
| 307 |
+
<summary>日本語</summary>
|
| 308 |
+
|
| 309 |
+
`--fp8_scaled` フラグを有効にすると、ローダーはまずベースとなる重みを FP16/BF16 のまま読み込み、必要に応じて LoRA や LyCORIS をマージした後、対象層の重みを FP8 の重みと、ブロックごとの `.scale_weight` バッファへ変換します。forward ではこのスケールを使って元の精度へ動的に逆量子化し計算を行います。
|
| 310 |
+
|
| 311 |
+
このリポジトリの現在のスクリプトでは、量子化はFP8 E4M3形式、ブロック単位量子化が用いられていますが、実装としては以下をサポートしています:
|
| 312 |
+
|
| 313 |
+
- Linear層のFP8(E4M3またはE5M2)重み量子化を実装
|
| 314 |
+
- 複数の量子化モード対応:テンソル単位、チャネル単位、ブロック単位量子化
|
| 315 |
+
- ブロック単位量子化は指定したブロックサイズ(デフォルト:64)での細粒度スケーリングによりより高い精度を提供
|
| 316 |
+
- 8ビットの重みを使用することでVRAM使用量を削減(既存の`--fp8` `--fp8_base` オプションに比べて微増)
|
| 317 |
+
- 単純なFP8へのcastではなく、適切な値でスケールして重みをFP8形式に量子化
|
| 318 |
+
- Linear層にmonkey patchingを適用し、計算時に透過的に逆量子化
|
| 319 |
+
- forward時に元の精度(FP16/BF16)に逆量子化して計算精度を維持
|
| 320 |
+
- 精度が重要な重み、たとえばnormやembedding、modulationは、FP16/BF16のまま保持(従来バージョンより除外対象を削減)
|
| 321 |
+
|
| 322 |
+
量子化と精度については[Discussion #564](https://github.com/kohya-ss/musubi-tuner/discussions/564)も参照してください。
|
| 323 |
+
|
| 324 |
+
※E4M3/ブロック単位以外の量子化のテストは不十分ですので、コードを他のプロジェクトで利用する場合等には注意してください。
|
| 325 |
+
|
| 326 |
+
</details>
|
| 327 |
+
|
| 328 |
+
### Quantization modes / 量子化モード
|
| 329 |
+
|
| 330 |
+
The current implementation supports three quantization modes:
|
| 331 |
+
|
| 332 |
+
- **Block-wise quantization (default)**: Divides weight matrices into blocks of configurable size (default: 64) and calculates separate scale factors for each block. Provides the best precision but requires more memory for scale storage.
|
| 333 |
+
- **Channel-wise quantization**: Calculates scale factors per output channel (row). Balances precision and memory usage.
|
| 334 |
+
- **Tensor-wise quantization**: Uses a single scale factor for the entire weight tensor. Lowest memory usage but may have reduced precision for some weights.
|
| 335 |
+
|
| 336 |
+
The implementation automatically falls back to simpler modes when block-wise quantization is not feasible (e.g., when weight dimensions are not divisible by block size).
|
| 337 |
+
|
| 338 |
+
<details>
|
| 339 |
+
<summary>日本語</summary>
|
| 340 |
+
|
| 341 |
+
現在の実装では3つの量子化モードをサポートしています:
|
| 342 |
+
|
| 343 |
+
- **ブロック単位量子化(デフォルト)**:重み行列を設定可能なサイズのブロック(デフォルト:64)に分割し、各ブロックに対して個別のスケール係数を計算します。最高の精度を提供しますが、スケール保存により追加メモリが必要です。
|
| 344 |
+
- **チャネル単位量子化**:出力チャネル(行)ごとにスケール係数を計算します。精度とメモリ使用量のバランスを取ります。
|
| 345 |
+
- **テンソル単位量子化**:重みテンソル全体に対して単一のスケール係数を使用します。最も少ないメモリ使用量ですが、一部の重みで精度が低下する場合があります。
|
| 346 |
+
|
| 347 |
+
実装では、ブロック単位量子化が実行不可能な場合(重み次元がブロックサイズで割り切れない場合など)、自動的により単純なモードにフォールバックします。
|
| 348 |
+
|
| 349 |
+
</details>
|
| 350 |
+
|
| 351 |
+
## PyTorch Dynamo optimization for model training / モデルの学習におけるPyTorch Dynamoの最適化
|
| 352 |
+
|
| 353 |
+
The PyTorch Dynamo options are now available to optimize the training process. PyTorch Dynamo is a Python-level JIT compiler designed to make unmodified PyTorch programs faster by using TorchInductor, a deep learning compiler. This integration allows for potential speedups in training while maintaining model accuracy.
|
| 354 |
+
|
| 355 |
+
[PR #215](https://github.com/kohya-ss/musubi-tuner/pull/215) added this feature.
|
| 356 |
+
|
| 357 |
+
Specify the `--dynamo_backend` option to enable Dynamo optimization with one of the available backends from the `DynamoBackend` enum.
|
| 358 |
+
|
| 359 |
+
Additional options allow for fine-tuning the Dynamo behavior:
|
| 360 |
+
- `--dynamo_mode`: Controls the optimization strategy
|
| 361 |
+
- `--dynamo_fullgraph`: Enables fullgraph mode for potentially better optimization
|
| 362 |
+
- `--dynamo_dynamic`: Enables dynamic shape handling
|
| 363 |
+
|
| 364 |
+
The `--dynamo_dynamic` option has been reported to have many problems based on the validation in PR #215.
|
| 365 |
+
|
| 366 |
+
### Available options:
|
| 367 |
+
|
| 368 |
+
```
|
| 369 |
+
--dynamo_backend {NO, INDUCTOR, NVFUSER, CUDAGRAPHS, CUDAGRAPHS_FALLBACK, etc.}
|
| 370 |
+
Specifies the Dynamo backend to use (default is NO, which disables Dynamo)
|
| 371 |
+
|
| 372 |
+
--dynamo_mode {default, reduce-overhead, max-autotune}
|
| 373 |
+
Specifies the optimization mode (default is 'default')
|
| 374 |
+
- 'default': Standard optimization
|
| 375 |
+
- 'reduce-overhead': Focuses on reducing compilation overhead
|
| 376 |
+
- 'max-autotune': Performs extensive autotuning for potentially better performance
|
| 377 |
+
|
| 378 |
+
--dynamo_fullgraph
|
| 379 |
+
Flag to enable fullgraph mode, which attempts to capture and optimize the entire model graph
|
| 380 |
+
|
| 381 |
+
--dynamo_dynamic
|
| 382 |
+
Flag to enable dynamic shape handling for models with variable input shapes
|
| 383 |
+
```
|
| 384 |
+
|
| 385 |
+
### Usage example:
|
| 386 |
+
|
| 387 |
+
```bash
|
| 388 |
+
python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode default
|
| 389 |
+
```
|
| 390 |
+
|
| 391 |
+
For more aggressive optimization:
|
| 392 |
+
```bash
|
| 393 |
+
python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode max-autotune --dynamo_fullgraph
|
| 394 |
+
```
|
| 395 |
+
|
| 396 |
+
Note: The best combination of options may depend on your specific model and hardware. Experimentation may be necessary to find the optimal configuration.
|
| 397 |
+
|
| 398 |
+
<details>
|
| 399 |
+
<summary>日本語</summary>
|
| 400 |
+
PyTorch Dynamoオプションが学習プロセスを最適化するために追加されました。PyTorch Dynamoは、TorchInductor(ディープラーニングコンパイラ)を使用して、変更を加えることなくPyTorchプログラムを高速化するためのPythonレベルのJITコンパイラです。この統合により、モデルの精度を維持しながら学習の高速化が期待できます。
|
| 401 |
+
|
| 402 |
+
[PR #215](https://github.com/kohya-ss/musubi-tuner/pull/215) で追加されました。
|
| 403 |
+
|
| 404 |
+
`--dynamo_backend`オプションを指定して、`DynamoBackend`列挙型から利用可能なバックエンドの一つを選択することで、Dynamo最適化を有効にします。
|
| 405 |
+
|
| 406 |
+
追加のオプションにより、Dynamoの動作を微調整できます:
|
| 407 |
+
- `--dynamo_mode`:最適化戦略を制御します
|
| 408 |
+
- `--dynamo_fullgraph`:より良い最適化の可能性のためにフルグラフモードを有効にします
|
| 409 |
+
- `--dynamo_dynamic`:動的形状処理を有効にします
|
| 410 |
+
|
| 411 |
+
PR #215での検証によると、`--dynamo_dynamic`には問題が多いことが報告されています。
|
| 412 |
+
|
| 413 |
+
__利用可能なオプション:__
|
| 414 |
+
|
| 415 |
+
```
|
| 416 |
+
--dynamo_backend {NO, INDUCTOR, NVFUSER, CUDAGRAPHS, CUDAGRAPHS_FALLBACK, など}
|
| 417 |
+
使用するDynamoバックエンドを指定します(デフォルトはNOで、Dynamoを無効にします)
|
| 418 |
+
|
| 419 |
+
--dynamo_mode {default, reduce-overhead, max-autotune}
|
| 420 |
+
最適化モードを指定します(デフォルトは 'default')
|
| 421 |
+
- 'default':標準的な最適化
|
| 422 |
+
- 'reduce-overhead':コンパイルのオーバーヘッド削減に焦点を当てる
|
| 423 |
+
- 'max-autotune':より良いパフォーマンスのために広範な自動調整を実行
|
| 424 |
+
|
| 425 |
+
--dynamo_fullgraph
|
| 426 |
+
フルグラフモードを有効にするフラグ。モデルグラフ全体をキャプチャして最適化しようとします
|
| 427 |
+
|
| 428 |
+
--dynamo_dynamic
|
| 429 |
+
可変入力形状を持つモデルのための動的形状処理を有効にするフラグ
|
| 430 |
+
```
|
| 431 |
+
|
| 432 |
+
__使用例:__
|
| 433 |
+
|
| 434 |
+
```bash
|
| 435 |
+
python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode default
|
| 436 |
+
```
|
| 437 |
+
|
| 438 |
+
より積極的な最適化の場合:
|
| 439 |
+
```bash
|
| 440 |
+
python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode max-autotune --dynamo_fullgraph
|
| 441 |
+
```
|
| 442 |
+
|
| 443 |
+
注意:最適なオプションの組み合わせは、特定のモデルとハードウェアに依存する場合があります。最適な構成を見つけるために実験が必要かもしれません。
|
| 444 |
+
</details>
|
| 445 |
+
|
| 446 |
+
## MagCache
|
| 447 |
+
|
| 448 |
+
The following is quoted from the [MagCache github repository](https://github.com/Zehong-Ma/MagCache) "Magnitude-aware Cache (MagCache) for Video Diffusion Models":
|
| 449 |
+
|
| 450 |
+
> We introduce Magnitude-aware Cache (MagCache), a training-free caching approach that estimates and leverages the fluctuating differences among model outputs across timesteps based on the robust magnitude observations, thereby accelerating the inference. MagCache works well for Video Diffusion Models, Image Diffusion models.
|
| 451 |
+
|
| 452 |
+
We have implemented the MagCache feature in Musubi Tuner. Some of the code is based on the MagCache repository. It is available for `fpack_generate_video.py` for now.
|
| 453 |
+
|
| 454 |
+
### Usage
|
| 455 |
+
|
| 456 |
+
1. Calibrate the mag ratios
|
| 457 |
+
- Run the inference script as normal, but with the `--magcache_calibration` option to calibrate the mag ratios. You will get a following output:
|
| 458 |
+
|
| 459 |
+
```
|
| 460 |
+
INFO:musubi_tuner.fpack_generate_video:Copy and paste following values to --magcache_mag_ratios argument to use them:
|
| 461 |
+
1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734
|
| 462 |
+
```
|
| 463 |
+
- It is recommended to run the calibration with your custom prompt and model.
|
| 464 |
+
- If you inference the multi-section video, you will get the mag ratios for each section. You can use the one of the sections or average them.
|
| 465 |
+
|
| 466 |
+
2. Use the mag ratios
|
| 467 |
+
- Run the inference script with the `--magcache_mag_ratios` option to use the mag ratios. For example:
|
| 468 |
+
|
| 469 |
+
```bash
|
| 470 |
+
python fpack_generate_video.py --magcache_mag_ratios 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
- Specify `--magcache_mag_ratios 0` to use the default mag ratios from the MagCache repository.
|
| 474 |
+
- It is recommended to use the same steps as the calibration. If the steps are different, the mag ratios is interpolated to the specified steps.
|
| 475 |
+
- You can also specify the `--magcache_retention_ratio`, `--magcache_threshold`, and `--magcache_k` options to control the MagCache behavior. The default values are 0.2, 0.24, and 6, respectively (same as the MagCache repository).
|
| 476 |
+
|
| 477 |
+
```bash
|
| 478 |
+
python fpack_generate_video.py --magcache_retention_ratio 0.2 --magcache_threshold 0.24 --magcache_k 6
|
| 479 |
+
```
|
| 480 |
+
|
| 481 |
+
- The `--magcache_retention_ratio` option controls the ratio of the steps not to cache. For example, if you set it to 0.2, the first 20% of the steps will not be cached. The default value is 0.2.
|
| 482 |
+
- The `--magcache_threshold` option controls the threshold whether to use the cached output or not. If the accumulated error is less than the threshold, the cached output will be used. The default value is 0.24.
|
| 483 |
+
- The error is calculated by the accumulated error multiplied by the mag ratio.
|
| 484 |
+
- The `--magcache_k` option controls the number of steps to use for the cache. The default value is 6, which means the consecutive 6 steps will be used for the cache. The default value 6 is recommended for 50 steps, so you may want to lower it for smaller number of steps.
|
| 485 |
+
|
| 486 |
+
### Generated video example
|
| 487 |
+
|
| 488 |
+
Using F1-model, without MagCache, approximately 90 seconds are required to generate single section video with 25 steps (without VAE decoding) in my environment.
|
| 489 |
+
|
| 490 |
+
https://github.com/user-attachments/assets/30b8d05e-9bd6-42bf-997f-5ba5b3dde876
|
| 491 |
+
|
| 492 |
+
With MagCache, default settings, approximately 30 seconds are required to generate with the same settings.
|
| 493 |
+
|
| 494 |
+
https://github.com/user-attachments/assets/080076ea-4088-443c-8138-4eeb00694ec5
|
| 495 |
+
|
| 496 |
+
With MagCache, `--magcache_retention_ratio 0.2 --magcache_threshold 0.12 --magcache_k 3`, approximately 35 seconds are required to generate with the same settings.
|
| 497 |
+
|
| 498 |
+
https://github.com/user-attachments/assets/27d6c7ff-e3db-4c52-8668-9a887441acef
|
| 499 |
+
|
| 500 |
+
<details>
|
| 501 |
+
<summary>日本語</summary>
|
| 502 |
+
|
| 503 |
+
以下は、[MagCache githubリポジトリ](https://github.com/Zehong-Ma/MagCache) "Magnitude-aware Cache (MagCache) for Video Diffusion Models"からの引用の拙訳です:
|
| 504 |
+
|
| 505 |
+
> Magnitude-aware Cache (MagCache)は、トレーニング不要のキャッシングアプローチで、堅牢なマグニチュード観測に基づいてタイムステップ間のモデル出力の変動差を推定および活用し、推論を加速します。MagCacheは、ビデオ拡散モデル、画像拡散モデルに適しています。
|
| 506 |
+
|
| 507 |
+
Musubi TunerにMagCache機能を実装しました。一部のコードはMagCacheリポジトリのコードを基にしています。現在は`fpack_generate_video.py`でのみ利用可能です。
|
| 508 |
+
|
| 509 |
+
### 使用方法
|
| 510 |
+
|
| 511 |
+
1. mag_ratiosのキャリブレーション
|
| 512 |
+
- `--magcache_calibration`オプションを指定して、それ以外は通常通り推論スクリプトを実行し、mag ratiosをキャリブレーションします。以下のような出力が得られます:
|
| 513 |
+
|
| 514 |
+
```
|
| 515 |
+
INFO:musubi_tuner.fpack_generate_video:Copy and paste following values to --magcache_mag_ratios argument to use them:
|
| 516 |
+
1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734
|
| 517 |
+
```
|
| 518 |
+
- カスタムプロンプトとモデルでキャリブレーションを実行することをお勧めします。
|
| 519 |
+
- 複数セクションビデオを推論する場合、各セクションのmag ratiosが出力されます。どれか一つ、またはそれらを平均した値を使ってください。
|
| 520 |
+
|
| 521 |
+
2. mag ratiosの使用
|
| 522 |
+
- `--magcache_mag_ratios`オプションでmag ratiosを指定して推論スクリプトを実行します。例:
|
| 523 |
+
|
| 524 |
+
```bash
|
| 525 |
+
python fpack_generate_video.py --magcache_mag_ratios 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734
|
| 526 |
+
```
|
| 527 |
+
|
| 528 |
+
- `--magcache_mag_ratios 0`を指定すると、MagCacheリポジトリのデフォルトのmag ratiosが使用されます。
|
| 529 |
+
- mag ratiosの数はキャリブレーションした時と同じステップ数を指定することをお勧めします。ステップ数が異なる場合、mag ratiosは指定されたステップ数に合うように補間されます。
|
| 530 |
+
- `--magcache_retention_ratio`, `--magcache_threshold`, `--magcache_k`オプションを指定してMagCacheの動作を制御できます。デフォルト値は0.2、0.24、6です(MagCacheリポジトリと同じです)。
|
| 531 |
+
|
| 532 |
+
```bash
|
| 533 |
+
python fpack_generate_video.py --magcache_retention_ratio 0.2 --magcache_threshold 0.24 --magcache_k 6
|
| 534 |
+
```
|
| 535 |
+
|
| 536 |
+
- `--magcache_retention_ratio`オプションは、キャッシュしないステップの割合を制御します。例えば、0.2に設定すると、最初の20%のステップはキャッシュされません。デフォルト値は0.2です。
|
| 537 |
+
- `--magcache_threshold`オプションは、キャッシュされた出力を使用するかどうかの閾値を制御します。累積誤差がこの閾値未満の場合、キャッシュされた出力が使用されます。デフォルト値は0.24です。
|
| 538 |
+
- 誤差は、累積誤差にmag ratioを掛けたものとして計算されます。
|
| 539 |
+
- `--magcache_k`オプションは、キャッシュに使用するステップ数を制御します。デフォルト値は6で、これは連続する6ステップがキャッシュに使用されることを意味します。デフォルト値6は恐らく50ステップの場合の推奨値のため、ステップ数が少ない場合は減らすことを検討してください。
|
| 540 |
+
|
| 541 |
+
生成サンプルは英語での説明を参照してください。
|
| 542 |
+
|
| 543 |
+
</details>
|
| 544 |
+
|
| 545 |
+
## Style-Friendly SNR Sampler
|
| 546 |
+
|
| 547 |
+
This sampler is based on the paper [Style-Friendly SNR Sampler for Style-Driven Generation](https://arxiv.org/abs/2411.14793). The paper argues that stylistic features in diffusion models are predominantly learned at high noise levels. This sampler biases the noise level (timestep) sampling towards these higher noise levels, which can significantly improve the model's ability to learn and reproduce specific styles.
|
| 548 |
+
|
| 549 |
+
This feature is enabled by specifying `--timestep_sampling`.
|
| 550 |
+
|
| 551 |
+
<details>
|
| 552 |
+
<summary>日本語</summary>
|
| 553 |
+
|
| 554 |
+
このサンプラーは、論文「[Style-Friendly SNR Sampler for Style-Driven Generation](https://arxiv.org/abs/2411.14793)」に基づいています。この論文では、拡散モデルにおけるスタイル特徴は、主にノイズレベルが高い領域で学習されると主張しています。このサンプラーは、ノイズレベル(タイムステップ)のサンプリングを意図的に高ノイズレベル側に偏らせることで、モデルが特定のスタイルを学習・再現する能力を大幅に向上させることができます。
|
| 555 |
+
|
| 556 |
+
この機能は `--timestep_sampling` を指定することで有効になります。
|
| 557 |
+
</details>
|
| 558 |
+
|
| 559 |
+
### `logsnr` Sampler
|
| 560 |
+
|
| 561 |
+
This is a direct implementation of the sampler proposed in the paper. It samples the log-SNR value from a normal distribution. By setting a low mean and a large standard deviation, it focuses the training on high-noise levels crucial for style learning.
|
| 562 |
+
|
| 563 |
+
To use this, specify `logsnr` for `--timestep_sampling`. You can also configure the mean and standard deviation of the log-SNR distribution with `--logit_mean` and `--logit_std`.
|
| 564 |
+
|
| 565 |
+
The paper recommends `logit_mean=-6.0` and `logit_std` of 2.0 or 3.0.
|
| 566 |
+
|
| 567 |
+
```bash
|
| 568 |
+
accelerate launch ... \
|
| 569 |
+
--timestep_sampling logsnr \
|
| 570 |
+
--logit_mean -6.0 \
|
| 571 |
+
--logit_std 2.0
|
| 572 |
+
```
|
| 573 |
+
|
| 574 |
+
Following is the distribution of the logsnr sampler:
|
| 575 |
+
|
| 576 |
+

|
| 577 |
+
|
| 578 |
+
<details>
|
| 579 |
+
<summary>日本語</summary>
|
| 580 |
+
|
| 581 |
+
論文で提案された通りのサンプラーの実装です。log-SNR値を正規分布からサンプリングします。低い平均値と大きな標準偏差を設定することで、スタイルの学習に不可欠な高ノイズレベル領域に学習を集中させます。
|
| 582 |
+
|
| 583 |
+
使用するには、`--timestep_sampling` に `logsnr` を指定します。また、`--logit_mean` と `--logit_std` でlog-SNR分布の平均と標準偏差を設定できます。
|
| 584 |
+
|
| 585 |
+
論文では `logit_mean=-6.0`、`logit_std` は2.0または3.0が推奨されています。
|
| 586 |
+
|
| 587 |
+
</details>
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
### `qinglong_flux` and `qinglong_qwen` Sampler (Hybrid Sampler)
|
| 591 |
+
|
| 592 |
+
This is a hybrid sampling method that combines three different samplers to balance style learning, model stability, and detail preservation. It is an experimental feature inspired by the Style-Friendly SNR Sampler. It was proposed by sdbds (Qing Long) in PR [#407](https://github.com/kohya-ss/musubi-tuner/pull/407).
|
| 593 |
+
|
| 594 |
+
In each training step, one of the following samplers is chosen for each sample in the batch based on a predefined ratio:
|
| 595 |
+
|
| 596 |
+
1. **flux_shift or qwen_shift (80%)**: The standard sampler for high-resolution models. Focuses on overall stability.
|
| 597 |
+
2. **logsnr (7.5%)**: The Style-Friendly sampler. Focuses on style learning.
|
| 598 |
+
3. **logsnr2 (12.5%)**: A sampler that focuses on low-noise regions (high log-SNR values). Aims to improve the learning of fine details.
|
| 599 |
+
|
| 600 |
+
To use this, specify `qinglong_flux` or `qinglong_qwen` for `--timestep_sampling`.
|
| 601 |
+
|
| 602 |
+
```bash
|
| 603 |
+
accelerate launch ... \
|
| 604 |
+
--timestep_sampling qinglong_flux \
|
| 605 |
+
--logit_mean -6.0 \
|
| 606 |
+
--logit_std 2.0
|
| 607 |
+
```
|
| 608 |
+
|
| 609 |
+
Following is the distribution of the qinglong flux sampler:
|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+
<details>
|
| 614 |
+
<summary>日本語</summary>
|
| 615 |
+
|
| 616 |
+
これは、スタイルの学習、モデルの安定性、ディテールの再現性のバランスを取るために、3つの異なるサンプラーを組み合わせたハイブリッドサンプリング手法です。Style-Friendly SNR Samplerにインスパイアされた実験的な機能です。PR [#407](https://github.com/kohya-ss/musubi-tuner/pull/407) で sdbds (Qing Long) 氏により提案されました。
|
| 617 |
+
|
| 618 |
+
各学習ステップにおいて、バッチ内の各サンプルに対して、あらかじめ定義された比率に基づき以下のいずれかのサンプラーが選択されます。
|
| 619 |
+
|
| 620 |
+
1. **flux_shift または qwen_shift (80%)**: 高解像度モデル向けの標準的なサンプラー。全体的な安定性を重視します。
|
| 621 |
+
2. **logsnr (7.5%)**: Style-Friendlyサンプラー。スタイルの学習を重視します。
|
| 622 |
+
3. **logsnr2 (12.5%)**: 低ノイズ領域(高いlog-SNR値)に焦点を当てたサンプラー。細部のディテール学習を向上させることを目的とします。
|
| 623 |
+
|
| 624 |
+
使用するには、`--timestep_sampling` に `qinglong_flux` または `qinglong_qwen` を指定します。
|
| 625 |
+
|
| 626 |
+
</details>
|
| 627 |
+
|
| 628 |
+
## Specify time step range for training / 学習時のタイムステップ範囲の指定
|
| 629 |
+
|
| 630 |
+
You can specify the range of timesteps for training. This is useful for focusing the training on a specific part of the diffusion process.
|
| 631 |
+
|
| 632 |
+
- `--min_timestep`: Specifies the minimum timestep for training (0-999, default: 0).
|
| 633 |
+
- `--max_timestep`: Specifies the maximum timestep for training (1-1000, default: 1000).
|
| 634 |
+
- `--preserve_distribution_shape`: If specified, it constrains timestep sampling to the `[min_timestep, max_timestep]` range using rejection sampling, which preserves the original distribution shape. By default, the `[0, 1]` range is scaled, which can distort the distribution. This option is only effective when `timestep_sampling` is not 'sigma'.
|
| 635 |
+
|
| 636 |
+
<details>
|
| 637 |
+
<summary>日本語</summary>
|
| 638 |
+
|
| 639 |
+
学習時のタイムステップの範囲を指定できます。これにより、拡散プロセスの特定の部分に学習を集中させることができます。
|
| 640 |
+
|
| 641 |
+
- `--min_timestep`: 学習時の最小タイムステップを指定します(0-999、デフォルト: 0)。
|
| 642 |
+
- `--max_timestep`: 学習時の最大タイムステップを指定します(1-1000、デフォルト: 1000)。
|
| 643 |
+
- `--preserve_distribution_shape`: 指定すると、タイムステップのサンプリングを棄却サンプリング(条件に合わないものを捨てる)を用いて `[min_timestep, max_timestep]` の範囲に制約し、元の分布形状を保持します。デフォルトでは、`[0, 1]` の範囲がスケーリングされるため、分布が歪む可能性があります。このオプションは `timestep_sampling` が 'sigma' 以外の場合にのみ有効です。
|
| 644 |
+
</details>
|
| 645 |
+
|
| 646 |
+
### Example / 記述例
|
| 647 |
+
|
| 648 |
+
To train only on the latter half of the timesteps (more detailed part) / タイムステップの後半(より詳細な部分)のみを学習する場合:
|
| 649 |
+
|
| 650 |
+
```bash
|
| 651 |
+
--min_timestep 500 --max_timestep 1000
|
| 652 |
+
```
|
| 653 |
+
|
| 654 |
+
To train only on the first half of the timesteps (more structural part) / タイムステップの前半(より構造的な部分)のみを学習する場合:
|
| 655 |
+
|
| 656 |
+
```bash
|
| 657 |
+
--min_timestep 0 --max_timestep 500
|
| 658 |
+
```
|
| 659 |
+
|
| 660 |
+
To train on a specific range while preserving the sampling distribution shape / サンプリング分布の形状を維持しつつ特定の範囲で学習する場合:
|
| 661 |
+
|
| 662 |
+
```bash
|
| 663 |
+
--min_timestep 200 --max_timestep 800 --preserve_distribution_shape
|
| 664 |
+
```
|
| 665 |
+
|
| 666 |
+
### Actual distribution shape / 実際の分布形状
|
| 667 |
+
|
| 668 |
+
You can visualize the distribution shape of the timesteps with `--show_timesteps image` (or console) option. The distribution shape is determined by the `--min_timestep`, `--max_timestep`, and `--preserve_distribution_shape` options.
|
| 669 |
+
|
| 670 |
+
In the following examples, the discrete flow shift is set to 3.0.
|
| 671 |
+
|
| 672 |
+
When `--min_timestep` and `--max_timestep` are not specified, the distribution shape is as follows:
|
| 673 |
+
|
| 674 |
+

|
| 675 |
+
|
| 676 |
+
When `--min_timestep 500` and `--max_timestep 100` are specified, and `--preserve_distribution_shape` is not specified, the distribution shape is as follows:
|
| 677 |
+
|
| 678 |
+

|
| 679 |
+
|
| 680 |
+
When `--min_timestep 500` and `--max_timestep 100` are specified, and `--preserve_distribution_shape` is specified, the distribution shape is as follows:
|
| 681 |
+
|
| 682 |
+

|
| 683 |
+
|
| 684 |
+
<details>
|
| 685 |
+
<summary>日本語</summary>
|
| 686 |
+
|
| 687 |
+
タイムステップの分布形状は、`--show_timesteps image`(またはconsole)オプションで確認できます。分布形状は、`--min_timestep`、`--max_timestep`、および `--preserve_distribution_shape` オプションによって決まります。
|
| 688 |
+
|
| 689 |
+
上の図はそれぞれ、離散フローシフトが3.0のとき、
|
| 690 |
+
|
| 691 |
+
1. `--min_timestep` と `--max_timestep` が指定されていない場合
|
| 692 |
+
2. `--min_timestep 500` と `--max_timestep 1000` が指定され、`--preserve_distribution_shape` が指定されていない場合
|
| 693 |
+
3. `--min_timestep 500` と `--max_timestep 1000` が指定され、`--preserve_distribution_shape` が指定された場合
|
| 694 |
+
|
| 695 |
+
の分布形状を示しています。
|
| 696 |
+
</details>
|
| 697 |
+
|
| 698 |
+
## Timestep Bucketing for Uniform Sampling / 均一なサンプリングのためのTimestep Bucketing
|
| 699 |
+
|
| 700 |
+
This feature is experimental.
|
| 701 |
+
|
| 702 |
+
When training with a small dataset or for a few epochs, the random sampling of timesteps can be biased, potentially leading to unstable training. To mitigate this, timestep bucketing ensures a more uniform distribution of timesteps throughout the training process.
|
| 703 |
+
|
| 704 |
+
This feature works as follows:
|
| 705 |
+
|
| 706 |
+
1. At the beginning of each epoch, it prepares a pool of timesteps equal to the number of items in the dataset for that epoch. These timesteps are calculated as follows:
|
| 707 |
+
- A specified number of buckets is created. Each bucket represents an equal interval of the `[0, 1]` range (e.g., with 5 buckets, the ranges are `[0, 0.2]`, `[0.2, 0.4]`, ... `[0.8, 1.0]`).
|
| 708 |
+
- Each bucket is filled with an equal number of randomly generated timesteps within its range.
|
| 709 |
+
- The number of timesteps in each bucket is calculated as "number of dataset items ÷ number of buckets".
|
| 710 |
+
|
| 711 |
+
2. All timesteps from all buckets are then combined and shuffled.
|
| 712 |
+
3. During training, instead of generating a random timestep for each item, one is drawn from this pre-shuffled pool.
|
| 713 |
+
|
| 714 |
+
This ensures that the model sees a balanced distribution of timesteps in each epoch, which can improve training stability, especially for LoRA training or when using small datasets.
|
| 715 |
+
|
| 716 |
+
This feature is enabled by specifying `--num_timestep_buckets`.
|
| 717 |
+
|
| 718 |
+
<details>
|
| 719 |
+
<summary>日本語</summary>
|
| 720 |
+
|
| 721 |
+
この機能は実験的なものです。
|
| 722 |
+
|
| 723 |
+
データセットが小さい場合や学習エポック数が少ない場合、タイムステップの乱数に偏りが生じることで、学習が不安定になる可能性があります。Timestep Bucketing機能は、この問題を軽減するための機能で、学習プロセス全体でタイムステップがより均一に分布するよう調整します。
|
| 724 |
+
|
| 725 |
+
この機能は以下のように動作します:
|
| 726 |
+
|
| 727 |
+
1. 各エポックの開始時に、あらかじめそのエポックのデータセットの件数と同じ数の、タイムステップを準備します。これらのタイムステップは以下のように計算されます。
|
| 728 |
+
|
| 729 |
+
- 指定された数のバケットを準備します。各バケットは `[0, 1]` の範囲を等分した区間を表します(例:5バケットの場合、`[0, 0.2]`、`[0.2, 0.4]` ... `[0.8, 1.0]`)。
|
| 730 |
+
- 各バケットに、その範囲内でランダムに生成されたタイムステップを配置します。
|
| 731 |
+
- それぞれのバケットのタイムステップの件数は、「データセットの件数÷バケット数」で計算されます。
|
| 732 |
+
|
| 733 |
+
2. すべてのバケットのタイムステップが結合され、シャッフルされます。
|
| 734 |
+
3. 学習時には、アイテムごとにランダムなタイムステップを生成する代わりに、この事前にシャッフルされたプールからタイムステップが取り出されます。
|
| 735 |
+
|
| 736 |
+
これにより、各エポックでモデルがバランスの取れたタイムステップの分布を使用することになり、特にLoRAの学習や小規模なデータセットを使用する際の学習の安定性が向上します。
|
| 737 |
+
|
| 738 |
+
この機能は `--num_timestep_buckets` を指定することで有効になります。
|
| 739 |
+
|
| 740 |
+
</details>
|
| 741 |
+
|
| 742 |
+
### How to use / 使用方法
|
| 743 |
+
|
| 744 |
+
Specify the number of buckets with the `--num_timestep_buckets` option. A value of 2 or more enables this feature. If not specified, it is disabled.
|
| 745 |
+
|
| 746 |
+
The community research is required to determine the optimal value, but starting with a value between `4` and `10` may be a good idea.
|
| 747 |
+
|
| 748 |
+
<details>
|
| 749 |
+
<summary>日本語</summary>
|
| 750 |
+
|
| 751 |
+
`--num_timestep_buckets` オプションでバケット数を指定します。2以上の値を指定するとこの機能が有効になります。指定しない場合は無効です。
|
| 752 |
+
|
| 753 |
+
最適な値に関してはコミュニティの検証が必要ですが、`4` から `10` 程度の値から始めると良いと思われます。
|
| 754 |
+
|
| 755 |
+
</details>
|
| 756 |
+
|
| 757 |
+
### Example / 記述例
|
| 758 |
+
|
| 759 |
+
```bash
|
| 760 |
+
accelerate launch ... \
|
| 761 |
+
--num_timestep_buckets 5
|
| 762 |
+
```
|
| 763 |
+
|
| 764 |
+
### Notes / 注意点
|
| 765 |
+
|
| 766 |
+
- This feature may not work as expected when training with both high and low noise models simultaneously in `wan_train_network.py` (`--dit_high_noise` option) or when `--preserve_distribution_shape` is specified. Because the way timesteps are handled will differ in these cases.
|
| 767 |
+
|
| 768 |
+
Specifically, instead of selecting from pre-configured timestep buckets, the process involves determining buckets on-demand and generating random timesteps within the range each bucket covers. Therefore, the uniform sampling effect may not be achieved, but some improvement can be expected compared to completely random generation (within the `[0, 1]` range).
|
| 769 |
+
|
| 770 |
+
<details>
|
| 771 |
+
<summary>日本語</summary>
|
| 772 |
+
|
| 773 |
+
- `wan_train_network.py` でhigh/lowノイズモデルを同時に学習する場合(`--dit_high_noise` オプション)、および、`--preserve_distribution_shape` を指定した場合、タイムステップの扱いが異なるため、この機能は期待通りに動作しない可能性があります。
|
| 774 |
+
|
| 775 |
+
具体的には、あらかじめ設定されたタイムステップのバケットから選択されるのではなく、都度、バケツの決定→範囲内でのランダムなタイムステップの生成が行われます。このため、均一なサンプリングの効果が得られない可能性がありますが、完全なランダム(`[0, 1]` の範囲での生成)に比べると、多少の改善が見込まれます。
|
| 776 |
+
|
| 777 |
+
</details>
|
| 778 |
+
|
| 779 |
+
## Schedule Free Optimizer / スケジュールフリーオプティマイザ
|
| 780 |
+
|
| 781 |
+
[Schedule Free Optimizer](https://github.com/facebookresearch/schedule_free) is an optimizer that does not require a learning rate schedule.
|
| 782 |
+
|
| 783 |
+
The library is optional, so you can install it with `pip install schedulefree`.
|
| 784 |
+
|
| 785 |
+
Specify the optimizer with the `--optimizer_type` argument, using the format `package_name.ClassName`, for example: `--optimizer_type schedulefree.AdamWScheduleFree`.
|
| 786 |
+
|
| 787 |
+
You can specify multiple arguments for the optimizer using the `--optimizer_args` argument in the form `arg_name=value` (e.g., `--optimizer_args "weight_decay=0.01" "betas=(0.9,0.95)"`).
|
| 788 |
+
|
| 789 |
+
<details>
|
| 790 |
+
<summary>日本語</summary>
|
| 791 |
+
|
| 792 |
+
[Schedule Free Optimizer](https://github.com/facebookresearch/schedule_free)は、学習率スケジュールを必要としないオプティマイザです。
|
| 793 |
+
|
| 794 |
+
ライブラリはオプションのため、`pip install schedulefree` でインストールしてください。
|
| 795 |
+
|
| 796 |
+
`--optimizer_type`引数に、` --optimizer_type schedulefree.AdamWScheduleFree`のように、`パッケージ名.クラス名`の形式で指定します。オプティマイザへの引数は、`--optimizer_args`に`引数名=値`の形で複数指定できます(例:`--optimizer_args "weight_decay=0.01" "betas=(0.9,0.95)"`)。
|
| 797 |
+
|
| 798 |
+
</details>
|
| 799 |
+
|
| 800 |
+
## Custom LR Scheduler / カスタムLRスケジューラ
|
| 801 |
+
|
| 802 |
+
### Rex
|
| 803 |
+
|
| 804 |
+
The Rex scheduler was added in [PR #513](https://github.com/kohya-ss/musubi-tuner/pull/513). It is based on the paper [REX: Revisiting Budgeted Training with an Improved Schedule](https://arxiv.org/abs/2107.04197), and the implementation is based on the repository by [IvanVassi](https://github.com/IvanVassi/REX_LR).
|
| 805 |
+
|
| 806 |
+
It has two parameters, `rex_alpha` and `rex_beta`, with default values of 0.1 and 0.9, respectively. These parameters are based on the defaults in IvanVassi's repository. The values proposed in the paper are 0.5 and 0.5. You can also use `--lr_warmup_steps` (default is 0) and `--lr_scheduler_min_lr_ratio` (default is 0.01).
|
| 807 |
+
|
| 808 |
+
It is similar to the Polynomial Scheduler with power less than 1, but Rex has a more gradual decrease in learning rate. For the specific LR curve, refer to the explanation in PR #513.
|
| 809 |
+
|
| 810 |
+
It is enabled by specifying `--lr_scheduler rex`. You can specify the parameters with `--lr_scheduler_args`.
|
| 811 |
+
|
| 812 |
+
```bash
|
| 813 |
+
--lr_scheduler rex --lr_scheduler_args "rex_alpha=0.1" "rex_beta=0.9"
|
| 814 |
+
```
|
| 815 |
+
|
| 816 |
+
<details>
|
| 817 |
+
<summary>日本語</summary>
|
| 818 |
+
|
| 819 |
+
Rexスケジューラは [PR #513](https://github.com/kohya-ss/musubi-tuner/pull/513) で追加されました。論文 [REX: Revisiting Budgeted Training with an Improved Schedule](https://arxiv.org/abs/2107.04197) に基づいていたもので、実装は [IvanVassi](https://github.com/IvanVassi/REX_LR) 氏のリポジトリを元にしています。
|
| 820 |
+
|
| 821 |
+
`rex_alpha`と`rex_beta`の2つのパラメータを持ち、デフォルト値はそれぞれ0.1と0.9です。これらのパラメータはIvanVassi氏のリポジトリのデフォルト値に基づいています。論文で提唱されている値はそれぞれ0.5/0.5です。また、`--lr_warmup_steps` (デフォルト値は0)および `--lr_scheduler_min_lr_ratio` (デフォルト値は0.01)も使用できます。
|
| 822 |
+
|
| 823 |
+
powerを1未満に設定した Polynomial Scheduler に似ていますが、Rexは学習率の減少がより緩やかです。具体的なLRのカーブはPR #513の説明を参照してください。
|
| 824 |
+
|
| 825 |
+
`--lr_scheduler rex`を指定することで有効になります。`--lr_scheduler_args`でパラメータを指定できます。
|
| 826 |
+
|
| 827 |
+
```bash
|
| 828 |
+
--lr_scheduler rex --lr_scheduler_args "rex_alpha=0.1" "rex_beta=0.9"
|
| 829 |
+
```
|
| 830 |
+
|
| 831 |
+
</details>
|
docs/dataset_config.md
ADDED
|
@@ -0,0 +1,724 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
## Dataset Configuration
|
| 4 |
+
|
| 5 |
+
Please create a TOML file for dataset configuration.
|
| 6 |
+
|
| 7 |
+
Image and video datasets are supported. The configuration file can include multiple datasets, either image or video datasets, with caption text files or metadata JSONL files.
|
| 8 |
+
|
| 9 |
+
The cache directory must be different for each dataset.
|
| 10 |
+
|
| 11 |
+
Each video is extracted frame by frame without additional processing and used for training. It is recommended to use videos with a frame rate of 24fps for HunyuanVideo, 16fps for Wan2.1 and 30fps for FramePack. You can check the videos that will be trained using `--debug_mode video` when caching latent (see [here](/README.md#latent-caching)).
|
| 12 |
+
<details>
|
| 13 |
+
<summary>日本語</summary>
|
| 14 |
+
|
| 15 |
+
データセットの設定を行うためのTOMLファイルを作成してください。
|
| 16 |
+
|
| 17 |
+
画像データセットと動画データセットがサポートされています。設定ファイルには、画像または動画データセットを複数含めることができます。キャプションテキストファイルまたはメタデータJSONLファイルを使用できます。
|
| 18 |
+
|
| 19 |
+
キャッシュディレクトリは、各データセットごとに異なるディレクトリである必要があります。
|
| 20 |
+
|
| 21 |
+
動画は追加のプロセスなしでフレームごとに抽出され、学習に用いられます。そのため、HunyuanVideoは24fps、Wan2.1は16fps、FramePackは30fpsのフレームレートの動画を使用することをお勧めします。latentキャッシュ時の`--debug_mode video`を使用すると、学習される動画を確認できます([こちら](/README.ja.md#latentの事前キャッシュ)を参照)。
|
| 22 |
+
</details>
|
| 23 |
+
|
| 24 |
+
### Sample for Image Dataset with Caption Text Files
|
| 25 |
+
|
| 26 |
+
```toml
|
| 27 |
+
# resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale should be set in either general or datasets
|
| 28 |
+
# otherwise, the default values will be used for each item
|
| 29 |
+
|
| 30 |
+
# general configurations
|
| 31 |
+
[general]
|
| 32 |
+
resolution = [960, 544]
|
| 33 |
+
caption_extension = ".txt"
|
| 34 |
+
batch_size = 1
|
| 35 |
+
enable_bucket = true
|
| 36 |
+
bucket_no_upscale = false
|
| 37 |
+
|
| 38 |
+
[[datasets]]
|
| 39 |
+
image_directory = "/path/to/image_dir"
|
| 40 |
+
cache_directory = "/path/to/cache_directory"
|
| 41 |
+
num_repeats = 1 # optional, default is 1. Number of times to repeat the dataset. Useful to balance the multiple datasets with different sizes.
|
| 42 |
+
# multiple_target = true # optional, default is false. Set to true for Qwen-Image-Layered training.
|
| 43 |
+
|
| 44 |
+
# other datasets can be added here. each dataset can have different configurations
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
`image_directory` is the directory containing images. The captions are stored in text files with the same filename as the image, but with the extension specified by `caption_extension` (for example, `image1.jpg` and `image1.txt`).
|
| 48 |
+
|
| 49 |
+
`cache_directory` is optional, default is None to use the same directory as the image directory. However, we recommend to set the cache directory to avoid accidental sharing of the cache files between different datasets.
|
| 50 |
+
|
| 51 |
+
`num_repeats` is also available. It is optional, default is 1 (no repeat). It repeats the images (or videos) that many times to expand the dataset. For example, if `num_repeats = 2` and there are 20 images in the dataset, each image will be duplicated twice (with the same caption) to have a total of 40 images. It is useful to balance the multiple datasets with different sizes.
|
| 52 |
+
|
| 53 |
+
For Qwen-Image-Layered training, set `multiple_target = true`. Also, in the `image_directory`, for each "image to be trained + segmentation (layer) results" combination, store the following (if `caption_extension` is `.txt`):
|
| 54 |
+
|
| 55 |
+
|Item|Example|Note|
|
| 56 |
+
|---|---|---|
|
| 57 |
+
|Caption file|`image1.txt`| |
|
| 58 |
+
|Image to be trained (image to be layered)|`image1.png`| |
|
| 59 |
+
|Segmentation (layer) result images|`image1_1.png`, `image1_2.png`, ...|Alpha channel required|
|
| 60 |
+
|
| 61 |
+
The next combination would be stored as `/path/to/layer_images/image2.txt` for caption, and `/path/to/layer_images/image2.png`, `/path/to/layer_images/image2_0.png`, `/path/to/layer_images/image2_1.png` for images.
|
| 62 |
+
|
| 63 |
+
<details>
|
| 64 |
+
<summary>日本語</summary>
|
| 65 |
+
|
| 66 |
+
`image_directory`は画像を含むディレクトリのパスです。キャプションは、画像と同じファイル名で、`caption_extension`で指定した拡張子のテキストファイルに格納してください(例:`image1.jpg`と`image1.txt`)。
|
| 67 |
+
|
| 68 |
+
`cache_directory` はオプションです。デフォルトは画像ディレクトリと同じディレクトリに設定されます。ただし、異なるデータセット間でキャッシュファイルが共有されるのを防ぐために、明示的に別のキャッシュディレクトリを設定することをお勧めします。
|
| 69 |
+
|
| 70 |
+
`num_repeats` はオプションで、デフォルトは 1 です(繰り返しなし)。画像(や動画)を、その回数だけ単純に繰り返してデータセットを拡張します。たとえば`num_repeats = 2`としたとき、画像20枚のデータセ���トなら、各画像が2枚ずつ(同一のキャプションで)計40枚存在した場合と同じになります。異なるデータ数のデータセット間でバランスを取るために使用可能です。
|
| 71 |
+
|
| 72 |
+
resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale は general または datasets のどちらかに設定してください。省略時は各項目のデフォルト値が使用されます。
|
| 73 |
+
|
| 74 |
+
`[[datasets]]`以下を追加することで、他のデータセットを追加できます。各データセットには異なる設定を持てます。
|
| 75 |
+
|
| 76 |
+
Qwen-Image-Layeredの学習の場合、`multiple_target = true`を設定してください。また、`image_directory`内に、それぞれの「学習する画像+分割結果」組み合わせごとに、以下を格納してください(`caption_extension`が`.txt`の場合)。
|
| 77 |
+
|
| 78 |
+
|項目|例|備考|
|
| 79 |
+
|---|---|---|
|
| 80 |
+
|キャプションファイル|`image1.txt`| |
|
| 81 |
+
|学習する画像(分割対象の画像)|`image1.png`| |
|
| 82 |
+
|分割結果のレイヤー画像群|`image1_1.png`, `image1_2.png`, ...|アルファチャンネル必須|
|
| 83 |
+
|
| 84 |
+
次の組み合わせは、`/path/to/layer_images/image2.txt`に対して、`/path/to/layer_images/image2.png`, `/path/to/layer_images/image2_0.png`, `/path/to/layer_images/image2_1.png`のように格納します。
|
| 85 |
+
|
| 86 |
+
</details>
|
| 87 |
+
|
| 88 |
+
### Sample for Image Dataset with Metadata JSONL File
|
| 89 |
+
|
| 90 |
+
```toml
|
| 91 |
+
# resolution, batch_size, num_repeats, enable_bucket, bucket_no_upscale should be set in either general or datasets
|
| 92 |
+
# caption_extension is not required for metadata jsonl file
|
| 93 |
+
# cache_directory is required for each dataset with metadata jsonl file
|
| 94 |
+
|
| 95 |
+
# general configurations
|
| 96 |
+
[general]
|
| 97 |
+
resolution = [960, 544]
|
| 98 |
+
batch_size = 1
|
| 99 |
+
enable_bucket = true
|
| 100 |
+
bucket_no_upscale = false
|
| 101 |
+
|
| 102 |
+
[[datasets]]
|
| 103 |
+
image_jsonl_file = "/path/to/metadata.jsonl"
|
| 104 |
+
cache_directory = "/path/to/cache_directory" # required for metadata jsonl file
|
| 105 |
+
num_repeats = 1 # optional, default is 1. Same as above.
|
| 106 |
+
# multiple_target = true # optional, default is false. Set to true for Qwen-Image-Layered training.
|
| 107 |
+
|
| 108 |
+
# other datasets can be added here. each dataset can have different configurations
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
JSONL file format for metadata:
|
| 112 |
+
|
| 113 |
+
```json
|
| 114 |
+
{"image_path": "/path/to/image1.jpg", "caption": "A caption for image1"}
|
| 115 |
+
{"image_path": "/path/to/image2.jpg", "caption": "A caption for image2"}
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
For Qwen-Image-Layered training, set `multiple_target = true`. Also, in the metadata JSONL file, for each "image to be trained + segmentation (layer) results" combination, specify the image paths with numbered attributes like `image_path_0`, `image_path_1`, etc.
|
| 119 |
+
|
| 120 |
+
```json
|
| 121 |
+
{"image_path_0": "/path/to/image1_base.png", "image_path_1": "/path/to/image1_layer1.png", "image_path_2": "/path/to/image1_layer2.png", "caption": "A caption for image1"}
|
| 122 |
+
{"image_path_0": "/path/to/image2_base.png", "image_path_1": "/path/to/image2_layer1.png", "image_path_2": "/path/to/image2_layer2.png", "caption": "A caption for image2"}
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
<details>
|
| 126 |
+
<summary>日本語</summary>
|
| 127 |
+
|
| 128 |
+
resolution, batch_size, num_repeats, enable_bucket, bucket_no_upscale は general または datasets のどちらかに設定してください。省略時は各項目のデフォルト値が使用されます。
|
| 129 |
+
|
| 130 |
+
metadata jsonl ファイルを使用する場合、caption_extension は必要ありません。また、cache_directory は必須です。
|
| 131 |
+
|
| 132 |
+
キャプションによるデータセットと同様に、複数のデータセットを追加できます。各データセットには異なる設定を持てます。
|
| 133 |
+
|
| 134 |
+
Qwen-Image-Layeredの学習の場合、`multiple_target = true`を設定してください。また、metadata jsonl ファイル内で、各画像に対して複数のターゲット画像を指定する場合は、`image_path_0`, `image_path_1`のように数字を付与してください。
|
| 135 |
+
|
| 136 |
+
</details>
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
### Sample for Video Dataset with Caption Text Files
|
| 140 |
+
|
| 141 |
+
```toml
|
| 142 |
+
# Common parameters (resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale)
|
| 143 |
+
# can be set in either general or datasets sections
|
| 144 |
+
# Video-specific parameters (target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps)
|
| 145 |
+
# must be set in each datasets section
|
| 146 |
+
|
| 147 |
+
# general configurations
|
| 148 |
+
[general]
|
| 149 |
+
resolution = [960, 544]
|
| 150 |
+
caption_extension = ".txt"
|
| 151 |
+
batch_size = 1
|
| 152 |
+
enable_bucket = true
|
| 153 |
+
bucket_no_upscale = false
|
| 154 |
+
|
| 155 |
+
[[datasets]]
|
| 156 |
+
video_directory = "/path/to/video_dir"
|
| 157 |
+
cache_directory = "/path/to/cache_directory" # recommended to set cache directory
|
| 158 |
+
target_frames = [1, 25, 45]
|
| 159 |
+
frame_extraction = "head"
|
| 160 |
+
source_fps = 30.0 # optional, source fps for videos in the directory, decimal number
|
| 161 |
+
|
| 162 |
+
[[datasets]]
|
| 163 |
+
video_directory = "/path/to/video_dir2"
|
| 164 |
+
cache_directory = "/path/to/cache_directory2" # recommended to set cache directory
|
| 165 |
+
frame_extraction = "full"
|
| 166 |
+
max_frames = 45
|
| 167 |
+
|
| 168 |
+
# other datasets can be added here. each dataset can have different configurations
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
`video_directory` is the directory containing videos. The captions are stored in text files with the same filename as the video, but with the extension specified by `caption_extension` (for example, `video1.mp4` and `video1.txt`).
|
| 172 |
+
|
| 173 |
+
__In HunyuanVideo and Wan2.1, the number of `target_frames` must be "N\*4+1" (N=0,1,2,...).__ Otherwise, it will be truncated to the nearest "N*4+1".
|
| 174 |
+
|
| 175 |
+
In FramePack, it is recommended to set `frame_extraction` to `full` and `max_frames` to a sufficiently large value, as it can handle longer videos. However, if the video is too long, an Out of Memory error may occur during VAE encoding. The videos in FramePack are trimmed to "N * latent_window_size * 4 + 1" frames (for example, 37, 73, 109... if `latent_window_size` is 9).
|
| 176 |
+
|
| 177 |
+
If the `source_fps` is specified, the videos in the directory are considered to be at this frame rate, and some frames will be skipped to match the model's frame rate (24 for HunyuanVideo and 16 for Wan2.1). __The value must be a decimal number, for example, `30.0` instead of `30`.__ The skipping is done automatically and does not consider the content of the images. Please check if the converted data is correct using `--debug_mode video`.
|
| 178 |
+
|
| 179 |
+
If `source_fps` is not specified (default), all frames of the video will be used regardless of the video's frame rate.
|
| 180 |
+
|
| 181 |
+
<details>
|
| 182 |
+
<summary>日本語</summary>
|
| 183 |
+
|
| 184 |
+
共通パラメータ(resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale)は、generalまたはdatasetsのいずれかに設定できます。
|
| 185 |
+
動画固有のパラメータ(target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps)は、各datasetsセクションに設定する必要があります。
|
| 186 |
+
|
| 187 |
+
`video_directory`は動画を含むディレクトリのパスです。キャプションは、動画と同じファイル名で、`caption_extension`で指定した拡張子のテキストファイルに格納してください(例:`video1.mp4`と`video1.txt`)。
|
| 188 |
+
|
| 189 |
+
__HunyuanVideoおよびWan2.1では、target_framesの数値は「N\*4+1」である必要があります。__ これ以外の値の場合は、最も近いN\*4+1の値に切り捨てられます。
|
| 190 |
+
|
| 191 |
+
FramePackでも同様ですが、FramePackでは動画が長くても学習可能なため、 `frame_extraction`に`full` を指定し、`max_frames`を十分に大きな値に設定することをお勧めします。ただし、あまりにも長すぎるとVAEのencodeでOut of Memoryエラーが発生する可能性があります。FramePackの動画は、「N * latent_window_size * 4 + 1」フレームにトリミングされます(latent_window_sizeが9の場合、37、73、109……)。
|
| 192 |
+
|
| 193 |
+
`source_fps`を指定した場合、ディレクトリ内の動画をこのフレームレートとみなして、モデルのフレームレートにあうようにいくつかのフレームをスキップします(HunyuanVideoは24、Wan2.1は16)。__小数点を含む数値で指定してください。__ 例:`30`ではなく`30.0`。スキップは機械的に行われ、画像の内容は考慮しません。変換後のデータが正しいか、`--debug_mode video`で確認してください。
|
| 194 |
+
|
| 195 |
+
`source_fps`を指定しない場合、動画のフレームは(動画自体のフレームレートに関係なく)すべて使用されます。
|
| 196 |
+
|
| 197 |
+
他の注意事項は画像データセットと同様です。
|
| 198 |
+
</details>
|
| 199 |
+
|
| 200 |
+
### Sample for Video Dataset with Metadata JSONL File
|
| 201 |
+
|
| 202 |
+
```toml
|
| 203 |
+
# Common parameters (resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale)
|
| 204 |
+
# can be set in either general or datasets sections
|
| 205 |
+
# Video-specific parameters (target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps)
|
| 206 |
+
# must be set in each datasets section
|
| 207 |
+
|
| 208 |
+
# caption_extension is not required for metadata jsonl file
|
| 209 |
+
# cache_directory is required for each dataset with metadata jsonl file
|
| 210 |
+
|
| 211 |
+
# general configurations
|
| 212 |
+
[general]
|
| 213 |
+
resolution = [960, 544]
|
| 214 |
+
batch_size = 1
|
| 215 |
+
enable_bucket = true
|
| 216 |
+
bucket_no_upscale = false
|
| 217 |
+
|
| 218 |
+
[[datasets]]
|
| 219 |
+
video_jsonl_file = "/path/to/metadata.jsonl"
|
| 220 |
+
target_frames = [1, 25, 45]
|
| 221 |
+
frame_extraction = "head"
|
| 222 |
+
cache_directory = "/path/to/cache_directory_head"
|
| 223 |
+
source_fps = 30.0 # optional, source fps for videos in the jsonl file
|
| 224 |
+
# same metadata jsonl file can be used for multiple datasets
|
| 225 |
+
[[datasets]]
|
| 226 |
+
video_jsonl_file = "/path/to/metadata.jsonl"
|
| 227 |
+
target_frames = [1]
|
| 228 |
+
frame_stride = 10
|
| 229 |
+
cache_directory = "/path/to/cache_directory_stride"
|
| 230 |
+
|
| 231 |
+
# other datasets can be added here. each dataset can have different configurations
|
| 232 |
+
```
|
| 233 |
+
|
| 234 |
+
JSONL file format for metadata:
|
| 235 |
+
|
| 236 |
+
```json
|
| 237 |
+
{"video_path": "/path/to/video1.mp4", "caption": "A caption for video1"}
|
| 238 |
+
{"video_path": "/path/to/video2.mp4", "caption": "A caption for video2"}
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
`video_path` can be a directory containing multiple images.
|
| 242 |
+
|
| 243 |
+
<details>
|
| 244 |
+
<summary>日本語</summary>
|
| 245 |
+
metadata jsonl ファイルを使用する場合、caption_extension は必要ありません。また、cache_directory は必須です。
|
| 246 |
+
|
| 247 |
+
`video_path`は、複数の画像を含むディレクトリのパスでも構いません。
|
| 248 |
+
|
| 249 |
+
他の注意事項は今までのデータセットと同様です。
|
| 250 |
+
</details>
|
| 251 |
+
|
| 252 |
+
### frame_extraction Options
|
| 253 |
+
|
| 254 |
+
- `head`: Extract the first N frames from the video.
|
| 255 |
+
- `chunk`: Extract frames by splitting the video into chunks of N frames.
|
| 256 |
+
- `slide`: Extract frames from the video with a stride of `frame_stride`.
|
| 257 |
+
- `uniform`: Extract `frame_sample` samples uniformly from the video.
|
| 258 |
+
- `full`: Extract all frames from the video.
|
| 259 |
+
|
| 260 |
+
In the case of `full`, the entire video is used, but it is trimmed to "N*4+1" frames. It is also trimmed to the `max_frames` if it exceeds that value. To avoid Out of Memory errors, please set `max_frames`.
|
| 261 |
+
|
| 262 |
+
The frame extraction methods other than `full` are recommended when the video contains repeated actions. `full` is recommended when each video represents a single complete motion.
|
| 263 |
+
|
| 264 |
+
For example, consider a video with 40 frames. The following diagrams illustrate each extraction:
|
| 265 |
+
|
| 266 |
+
<details>
|
| 267 |
+
<summary>日本語</summary>
|
| 268 |
+
|
| 269 |
+
- `head`: 動画から最初のNフレームを抽出します。
|
| 270 |
+
- `chunk`: 動画をNフレームずつに分割してフレームを抽出します。
|
| 271 |
+
- `slide`: `frame_stride`に指定したフレームごとに動画からNフレームを抽出します。
|
| 272 |
+
- `uniform`: 動画から一定間隔で、`frame_sample`個のNフレームを抽出します。
|
| 273 |
+
- `full`: 動画から全てのフレームを抽出します。
|
| 274 |
+
|
| 275 |
+
`full`の場合、各動画の全体を用いますが、「N*4+1」のフレーム数にトリミングされます。また`max_frames`を超える場合もその値にトリミングされます。Out of Memoryエラーを避けるために、`max_frames`を設定してください。
|
| 276 |
+
|
| 277 |
+
`full`以外の抽出方法は、動画が特定の動作を繰り返している場合にお勧めします。`full`はそれぞれの動画がひとつの完結したモーションの場合にお勧めします。
|
| 278 |
+
|
| 279 |
+
例えば、40フレームの動画を例とした抽出について、以下の図で説明します。
|
| 280 |
+
</details>
|
| 281 |
+
|
| 282 |
+
```
|
| 283 |
+
Original Video, 40 frames: x = frame, o = no frame
|
| 284 |
+
oooooooooooooooooooooooooooooooooooooooo
|
| 285 |
+
|
| 286 |
+
head, target_frames = [1, 13, 25] -> extract head frames:
|
| 287 |
+
xooooooooooooooooooooooooooooooooooooooo
|
| 288 |
+
xxxxxxxxxxxxxooooooooooooooooooooooooooo
|
| 289 |
+
xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo
|
| 290 |
+
|
| 291 |
+
chunk, target_frames = [13, 25] -> extract frames by splitting into chunks, into 13 and 25 frames:
|
| 292 |
+
xxxxxxxxxxxxxooooooooooooooooooooooooooo
|
| 293 |
+
oooooooooooooxxxxxxxxxxxxxoooooooooooooo
|
| 294 |
+
ooooooooooooooooooooooooooxxxxxxxxxxxxxo
|
| 295 |
+
xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo
|
| 296 |
+
|
| 297 |
+
NOTE: Please do not include 1 in target_frames if you are using the frame_extraction "chunk". It will make the all frames to be extracted.
|
| 298 |
+
注: frame_extraction "chunk" を使用する場合、target_frames に 1 を含めないでください。全てのフレームが抽出されてしまいます。
|
| 299 |
+
|
| 300 |
+
slide, target_frames = [1, 13, 25], frame_stride = 10 -> extract N frames with a stride of 10:
|
| 301 |
+
xooooooooooooooooooooooooooooooooooooooo
|
| 302 |
+
ooooooooooxooooooooooooooooooooooooooooo
|
| 303 |
+
ooooooooooooooooooooxooooooooooooooooooo
|
| 304 |
+
ooooooooooooooooooooooooooooooxooooooooo
|
| 305 |
+
xxxxxxxxxxxxxooooooooooooooooooooooooooo
|
| 306 |
+
ooooooooooxxxxxxxxxxxxxooooooooooooooooo
|
| 307 |
+
ooooooooooooooooooooxxxxxxxxxxxxxooooooo
|
| 308 |
+
xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo
|
| 309 |
+
ooooooooooxxxxxxxxxxxxxxxxxxxxxxxxxooooo
|
| 310 |
+
|
| 311 |
+
uniform, target_frames =[1, 13, 25], frame_sample = 4 -> extract `frame_sample` samples uniformly, N frames each:
|
| 312 |
+
xooooooooooooooooooooooooooooooooooooooo
|
| 313 |
+
oooooooooooooxoooooooooooooooooooooooooo
|
| 314 |
+
oooooooooooooooooooooooooxoooooooooooooo
|
| 315 |
+
ooooooooooooooooooooooooooooooooooooooox
|
| 316 |
+
xxxxxxxxxxxxxooooooooooooooooooooooooooo
|
| 317 |
+
oooooooooxxxxxxxxxxxxxoooooooooooooooooo
|
| 318 |
+
ooooooooooooooooooxxxxxxxxxxxxxooooooooo
|
| 319 |
+
oooooooooooooooooooooooooooxxxxxxxxxxxxx
|
| 320 |
+
xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo
|
| 321 |
+
oooooxxxxxxxxxxxxxxxxxxxxxxxxxoooooooooo
|
| 322 |
+
ooooooooooxxxxxxxxxxxxxxxxxxxxxxxxxooooo
|
| 323 |
+
oooooooooooooooxxxxxxxxxxxxxxxxxxxxxxxxx
|
| 324 |
+
|
| 325 |
+
Three Original Videos, 20, 25, 35 frames: x = frame, o = no frame
|
| 326 |
+
|
| 327 |
+
full, max_frames = 31 -> extract all frames (trimmed to the maximum length):
|
| 328 |
+
video1: xxxxxxxxxxxxxxxxx (trimmed to 17 frames)
|
| 329 |
+
video2: xxxxxxxxxxxxxxxxxxxxxxxxx (25 frames)
|
| 330 |
+
video3: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx (trimmed to 31 frames)
|
| 331 |
+
```
|
| 332 |
+
|
| 333 |
+
### Sample for Image Dataset with Control Images
|
| 334 |
+
|
| 335 |
+
The dataset with control images. This is used for the one frame training for FramePack, or for FLUX.1 Kontext, FLUX.2 and Qwen-Image-Edit training.
|
| 336 |
+
|
| 337 |
+
The dataset configuration with caption text files is similar to the image dataset, but with an additional `control_directory` parameter.
|
| 338 |
+
|
| 339 |
+
The control images are used from the `control_directory` with the same filename (or different extension) as the image, for example, `image_dir/image1.jpg` and `control_dir/image1.png`. The images in `image_directory` should be the target images (the images to be generated during inference, the changed images). The `control_directory` should contain the starting images for inference. The captions should be stored in `image_directory`.
|
| 340 |
+
|
| 341 |
+
If multiple control images are specified, the filenames of the control images should be numbered (excluding the extension). For example, specify `image_dir/image1.jpg` and `control_dir/image1_0.png`, `control_dir/image1_1.png`. You can also specify the numbers with four digits, such as `image1_0000.png`, `image1_0001.png`.
|
| 342 |
+
|
| 343 |
+
The metadata JSONL file format is the same as the image dataset, but with an additional `control_path` parameter.
|
| 344 |
+
|
| 345 |
+
```json
|
| 346 |
+
{"image_path": "/path/to/image1.jpg", "control_path": "/path/to/control1.png", "caption": "A caption for image1"}
|
| 347 |
+
{"image_path": "/path/to/image2.jpg", "control_path": "/path/to/control2.png", "caption": "A caption for image2"}
|
| 348 |
+
|
| 349 |
+
If multiple control images are specified, the attribute names should be `control_path_0`, `control_path_1`, etc.
|
| 350 |
+
|
| 351 |
+
```json
|
| 352 |
+
{"image_path": "/path/to/image1.jpg", "control_path_0": "/path/to/control1_0.png", "control_path_1": "/path/to/control1_1.png", "caption": "A caption for image1"}
|
| 353 |
+
{"image_path": "/path/to/image2.jpg", "control_path_0": "/path/to/control2_0.png", "control_path_1": "/path/to/control2_1.png", "caption": "A caption for image2"}
|
| 354 |
+
```
|
| 355 |
+
|
| 356 |
+
The control images can also have an alpha channel. In this case, the alpha channel of the image is used as a mask for the latent. This is only for the one frame training of FramePack.
|
| 357 |
+
|
| 358 |
+
<details>
|
| 359 |
+
<summary>日本語</summary>
|
| 360 |
+
|
| 361 |
+
制御画像を持つデータセットです。現時点ではFramePackの単一フレーム学習、FLUX.1 Kontext、FLUX.2、Qwen-Image-Editの学習に使用します。
|
| 362 |
+
|
| 363 |
+
キャプションファイルを用いる場合は`control_directory`を追加で指定してください。制御画像は、画像と同じファイル名(または拡張子のみが異なるファイル名)の、`control_directory`にある画像が使用されます(例:`image_dir/image1.jpg`と`control_dir/image1.png`)。`image_directory`の画像は学習対象の画像(推論時に生成する画像、変化後の画像)としてください。`control_directory`には推論時の開始画像を格納してください。キャプションは`image_directory`へ格納してください。
|
| 364 |
+
|
| 365 |
+
複数枚の制御画像が指定可能です。この場合、制御画像のファイル名(拡張子を除く)へ数字を付与してください。例えば、`image_dir/image1.jpg`と`control_dir/image1_0.png`, `control_dir/image1_1.png`のように指定します。`image1_0000.png`, `image1_0001.png`のように数字を4桁で指定することもできます。
|
| 366 |
+
|
| 367 |
+
メタデータJSONLファイルを使用する場合は、`control_path`を追加してください。複数枚の制御画像を指定する場合は、`control_path_0`, `control_path_1`のように数字を付与してください。
|
| 368 |
+
|
| 369 |
+
FramePackの単一フレーム学習では、制御画像はアルファチャンネルを持つこともできます。この場合、画像のアルファチャンネルはlatentへのマスクとして使用されます。
|
| 370 |
+
|
| 371 |
+
</details>
|
| 372 |
+
|
| 373 |
+
### Resizing Control Images for Image Dataset / 画像データセットでの制御画像のリサイズ
|
| 374 |
+
|
| 375 |
+
By default, the control images are resized to the same size as the target images. You can change the resizing method with the following options:
|
| 376 |
+
|
| 377 |
+
- `no_resize_control`: Do not resize the control images. They will be cropped to match the rounding unit of each architecture (for example, 16 pixels).
|
| 378 |
+
- `control_resolution`: Resize the control images to the specified resolution. For example, specify `control_resolution = [1024, 1024]`. Aspect Ratio Bucketing will be applied.
|
| 379 |
+
|
| 380 |
+
```toml
|
| 381 |
+
[[datasets]]
|
| 382 |
+
# Image directory or metadata jsonl file as above
|
| 383 |
+
image_directory = "/path/to/image_dir"
|
| 384 |
+
control_directory = "/path/to/control_dir"
|
| 385 |
+
control_resolution = [1024, 1024]
|
| 386 |
+
no_resize_control = false
|
| 387 |
+
```
|
| 388 |
+
|
| 389 |
+
If both are specified, `control_resolution` is treated as the maximum resolution. That is, if the total number of pixels of the control image exceeds that of `control_resolution`, it will be resized to `control_resolution`.
|
| 390 |
+
|
| 391 |
+
The recommended resizing method for control images may vary depending on the architecture. Please refer to the section for each architecture.
|
| 392 |
+
|
| 393 |
+
The previous options `flux_kontext_no_resize_control` and `qwen_image_edit_no_resize_control` are still available, but it is recommended to use `no_resize_control`.
|
| 394 |
+
|
| 395 |
+
The `qwen_image_edit_control_resolution` is also available, but it is recommended to use `control_resolution`.
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
**The technical details of `no_resize_control`:**
|
| 399 |
+
|
| 400 |
+
When this option is specified, the control image is trimmed to a multiple of 16 pixels (depending on the architecture) and converted to latent and passed to the model.
|
| 401 |
+
|
| 402 |
+
Each element in the batch must have the same resolution, which is adjusted by advanced Aspect Ratio Bucketing (buckets are divided by the resolution of the target image and also the resolution of the control image).
|
| 403 |
+
|
| 404 |
+
<details>
|
| 405 |
+
<summary>日本語</summary>
|
| 406 |
+
|
| 407 |
+
デフォルトでは、制御画像はターゲット画像と同じサイ��にリサイズされます。以下のオプションで、リサイズ方式を変更できます。
|
| 408 |
+
|
| 409 |
+
- `no_resize_control`: 制御画像をリサイズしません。アーキテクチャごとの丸め単位(16ピクセルなど)に合わせてトリミングされます。
|
| 410 |
+
- `control_resolution`: 制御画像を指定した解像度にリサイズします。例えば、`control_resolution = [1024, 1024]`と指定します。Aspect Ratio Bucketingが適用されます。
|
| 411 |
+
|
| 412 |
+
両方が同時に指定されると、`control_resolution`は最大解像度として扱われます。つまり、制御画像の総ピクセル数が`control_resolution`の総ピクセル数を超える場合、`control_resolution`にリサイズされます。
|
| 413 |
+
|
| 414 |
+
アーキテクチャにより、推奨の制御画像のリサイズ方法は異なります。各アーキテクチャの節を参照してください。
|
| 415 |
+
|
| 416 |
+
以前のオプション`flux_kontext_no_resize_control`と`qwen_image_edit_no_resize_control`は使用可能ですが、`no_resize_control`を使用することを推奨します。
|
| 417 |
+
|
| 418 |
+
`qwen_image_edit_control_resolution`も使用可能ですが、`control_resolution`を使用することを推奨します。
|
| 419 |
+
|
| 420 |
+
**`no_resize_control`の技術的な詳細:**
|
| 421 |
+
|
| 422 |
+
このオプションが指定された場合、制御画像は16ピクセルの倍数(アーキテクチャに依存)にトリミングされ、latentに変換されてモデルに渡されます。
|
| 423 |
+
|
| 424 |
+
バッチ内の各要素は同じ解像度である必要がありますが、ターゲット画像の解像度と制御画像の解像度の両方でバケットが分割される高度なAspect Ratio Bucketingによって調整されます。
|
| 425 |
+
|
| 426 |
+
</details>
|
| 427 |
+
|
| 428 |
+
### Sample for Video Dataset with Control Images
|
| 429 |
+
|
| 430 |
+
The dataset with control videos is used for training ControlNet models.
|
| 431 |
+
|
| 432 |
+
The dataset configuration with caption text files is similar to the video dataset, but with an additional `control_directory` parameter.
|
| 433 |
+
|
| 434 |
+
The control video for a video is used from the `control_directory` with the same filename (or different extension) as the video, for example, `video_dir/video1.mp4` and `control_dir/video1.mp4` or `control_dir/video1.mov`. The control video can also be a directory without an extension, for example, `video_dir/video1.mp4` and `control_dir/video1`.
|
| 435 |
+
|
| 436 |
+
```toml
|
| 437 |
+
[[datasets]]
|
| 438 |
+
video_directory = "/path/to/video_dir"
|
| 439 |
+
control_directory = "/path/to/control_dir" # required for dataset with control videos
|
| 440 |
+
cache_directory = "/path/to/cache_directory" # recommended to set cache directory
|
| 441 |
+
target_frames = [1, 25, 45]
|
| 442 |
+
frame_extraction = "head"
|
| 443 |
+
```
|
| 444 |
+
|
| 445 |
+
The dataset configuration with metadata JSONL file is same as the video dataset, but metadata JSONL file must include the control video paths. The control video path can be a directory containing multiple images.
|
| 446 |
+
|
| 447 |
+
```json
|
| 448 |
+
{"video_path": "/path/to/video1.mp4", "control_path": "/path/to/control1.mp4", "caption": "A caption for video1"}
|
| 449 |
+
{"video_path": "/path/to/video2.mp4", "control_path": "/path/to/control2.mp4", "caption": "A caption for video2"}
|
| 450 |
+
```
|
| 451 |
+
|
| 452 |
+
<details>
|
| 453 |
+
<summary>日本語</summary>
|
| 454 |
+
|
| 455 |
+
制御動画を持つデータセットです。ControlNetモデルの学習に使用します。
|
| 456 |
+
|
| 457 |
+
キャプションを用いる場合のデータセット設定は動画データセットと似ていますが、`control_directory`パラメータが追加されています。上にある例を参照してください。ある動画に対する制御用動画として、動画と同じファイル名(または拡張子のみが異なるファイル名)の、`control_directory`にある動画が使用されます(例:`video_dir/video1.mp4`と`control_dir/video1.mp4`または`control_dir/video1.mov`)。また、拡張子なしのディレクトリ内の、複数枚の画像を制御用動画として使用することもできます(例:`video_dir/video1.mp4`と`control_dir/video1`)。
|
| 458 |
+
|
| 459 |
+
データセット設定でメタデータJSONLファイルを使用する場合は、動画と制御用動画のパスを含める必要があります。制御用動画のパスは、複数枚の画像を含むディレクトリのパスでも構いません。
|
| 460 |
+
|
| 461 |
+
</details>
|
| 462 |
+
|
| 463 |
+
## Architecture-specific Settings / アーキテクチャ固有の設定
|
| 464 |
+
|
| 465 |
+
The dataset configuration is shared across all architectures. However, some architectures may require additional settings or have specific requirements for the dataset.
|
| 466 |
+
|
| 467 |
+
### FramePack
|
| 468 |
+
|
| 469 |
+
For FramePack, you can set the latent window size for training. It is recommended to set it to 9 for FramePack training. The default value is 9, so you can usually omit this setting.
|
| 470 |
+
|
| 471 |
+
```toml
|
| 472 |
+
[[datasets]]
|
| 473 |
+
fp_latent_window_size = 9
|
| 474 |
+
```
|
| 475 |
+
|
| 476 |
+
<details>
|
| 477 |
+
<summary>日本語</summary>
|
| 478 |
+
|
| 479 |
+
学習時のlatent window sizeを指定できます。FramePackの学習においては、9を指定することを推奨します。省略時は9が使用されますので、通常は省略して構いません。
|
| 480 |
+
|
| 481 |
+
</details>
|
| 482 |
+
|
| 483 |
+
### FramePack One Frame Training
|
| 484 |
+
|
| 485 |
+
For the default one frame training of FramePack, you need to set the following parameters in the dataset configuration:
|
| 486 |
+
|
| 487 |
+
```toml
|
| 488 |
+
[[datasets]]
|
| 489 |
+
fp_1f_clean_indices = [0]
|
| 490 |
+
fp_1f_target_index = 9
|
| 491 |
+
fp_1f_no_post = false
|
| 492 |
+
```
|
| 493 |
+
|
| 494 |
+
**Advanced Settings:**
|
| 495 |
+
|
| 496 |
+
**Note that these parameters are still experimental, and the optimal values are not yet known.** The parameters may also change in the future.
|
| 497 |
+
|
| 498 |
+
`fp_1f_clean_indices` sets the `clean_indices` value passed to the FramePack model. You can specify multiple indices. `fp_1f_target_index` sets the index of the frame to be trained (generated). `fp_1f_no_post` sets whether to add a zero value as `clean_latent_post`, default is `false` (add zero value).
|
| 499 |
+
|
| 500 |
+
The number of control images should match the number of indices specified in `fp_1f_clean_indices`.
|
| 501 |
+
|
| 502 |
+
The default values mean that the first image (control image) is at index `0`, and the target image (the changed image) is at index `9`.
|
| 503 |
+
|
| 504 |
+
For training with 1f-mc, set `fp_1f_clean_indices` to `[0, 1]` and `fp_1f_target_index` to `9` (or another value). This allows you to use multiple control images to train a single generated image. The control images will be two in this case.
|
| 505 |
+
|
| 506 |
+
```toml
|
| 507 |
+
[[datasets]]
|
| 508 |
+
fp_1f_clean_indices = [0, 1]
|
| 509 |
+
fp_1f_target_index = 9
|
| 510 |
+
fp_1f_no_post = false
|
| 511 |
+
```
|
| 512 |
+
|
| 513 |
+
For training with kisekaeichi, set `fp_1f_clean_indices` to `[0, 10]` and `fp_1f_target_index` to `1` (or another value). This allows you to use the starting image (the image just before the generation section) and the image following the generation section (equivalent to `clean_latent_post`) to train the first image of the generated video. The control images will be two in this case. `fp_1f_no_post` should be set to `true`.
|
| 514 |
+
|
| 515 |
+
```toml
|
| 516 |
+
[[datasets]]
|
| 517 |
+
fp_1f_clean_indices = [0, 10]
|
| 518 |
+
fp_1f_target_index = 1
|
| 519 |
+
fp_1f_no_post = true
|
| 520 |
+
```
|
| 521 |
+
|
| 522 |
+
With `fp_1f_clean_indices` and `fp_1f_target_index`, you can specify any number of control images and any index of the target image for training.
|
| 523 |
+
|
| 524 |
+
If you set `fp_1f_no_post` to `false`, the `clean_latent_post_index` will be `1 + fp1_latent_window_size`.
|
| 525 |
+
|
| 526 |
+
You can also set the `no_2x` and `no_4x` options for cache scripts to disable the clean latents 2x and 4x.
|
| 527 |
+
|
| 528 |
+
The 2x indices are `1 + fp1_latent_window_size + 1` for two indices (usually `11, 12`), and the 4x indices are `1 + fp1_latent_window_size + 1 + 2` for sixteen indices (usually `13, 14, ..., 28`), regardless of `fp_1f_no_post` and `no_2x`, `no_4x` settings.
|
| 529 |
+
|
| 530 |
+
<details>
|
| 531 |
+
<summary>日本語</summary>
|
| 532 |
+
|
| 533 |
+
※ **以下のパラメータは研究中で最適値はまだ不明です。** またパラメータ自体も変更される可能性があります。
|
| 534 |
+
|
| 535 |
+
デフォルトの1フレーム学習を行う場合、`fp_1f_clean_indices`に`[0]`を、`fp_1f_target_index`に`9`(または5から15程度の値)を、`no_post`に`false`を設定してください。(記述例は英語版ドキュメントを参照、以降同じ。)
|
| 536 |
+
|
| 537 |
+
**より高度な設定:**
|
| 538 |
+
|
| 539 |
+
`fp_1f_clean_indices`は、FramePackモデルに渡される `clean_indices` の値を設定します。複数指定が可能です。`fp_1f_target_index`は、学習(生成)対象のフレームのインデックスを設定します。`fp_1f_no_post`は、`clean_latent_post` をゼロ値で追加するかどうかを設定します(デフォルトは`false`で、ゼロ値で追加します)。
|
| 540 |
+
|
| 541 |
+
制御画像の枚数は`fp_1f_clean_indices`に指定したインデックスの数とあわせてください。
|
| 542 |
+
|
| 543 |
+
デフォルトの1フレーム学習では、開始画像(制御画像)1枚をインデックス`0`、生成対象の画像(変化後の画像)をインデックス`9`に設定しています。
|
| 544 |
+
|
| 545 |
+
1f-mcの学習を行う場合は、`fp_1f_clean_indices`に `[0, 1]`を、`fp_1f_target_index`に`9`を設定してください。これにより動画の先頭の2枚の制御画像を使用して、後続の1枚の生成画像を学習します。制御画像は2枚になります。
|
| 546 |
+
|
| 547 |
+
kisekaeichiの学習を行う場合は、`fp_1f_clean_indices`に `[0, 10]`を、`fp_1f_target_index`に`1`(または他の値)を設定してください。これは、開始画像(生成セクションの直前の画像)(`clean_latent_pre`に相当)と、生成セクションに続く1枚の画像(`clean_latent_post`に相当)を使用して、生成動画の先頭の画像(`target_index=1`)を学習します。制御画像は2枚になります。`f1_1f_no_post`は`true`に設定してください。
|
| 548 |
+
|
| 549 |
+
`fp_1f_clean_indices`と`fp_1f_target_index`を応用することで、任意の枚数の制御画像を、任意のインデックスを指定して学習することが可能です。
|
| 550 |
+
|
| 551 |
+
`fp_1f_no_post`を`false`に設定すると、`clean_latent_post_index`は `1 + fp1_latent_window_size` になります。
|
| 552 |
+
|
| 553 |
+
推論時の `no_2x`、`no_4x`に対応する設定は、キャッシュスクリプトの引数で行えます。なお、2xのindexは `1 + fp1_latent_window_size + 1` からの2個(通常は`11, 12`)、4xのindexは `1 + fp1_latent_window_size + 1 + 2` からの16個になります��通常は`13, 14, ..., 28`)です。これらの値は`fp_1f_no_post`や`no_2x`, `no_4x`の設定に関わらず、常に同じです。
|
| 554 |
+
|
| 555 |
+
</details>
|
| 556 |
+
|
| 557 |
+
### FLUX.1 Kontext [dev]
|
| 558 |
+
|
| 559 |
+
The FLUX.1 Kontext dataset configuration uses an image dataset with control images. However, only one control image can be used.
|
| 560 |
+
|
| 561 |
+
`fp_1f_*` settings are not used in FLUX.1 Kontext. Masks are also not used.
|
| 562 |
+
|
| 563 |
+
If you set `no_resize_control`, it disables resizing of the control image.
|
| 564 |
+
|
| 565 |
+
Since FLUX.1 Kontext assumes a fixed [resolution of control images](https://github.com/black-forest-labs/flux/blob/1371b2bc70ac80e1078446308dd5b9a2ebc68c87/src/flux/util.py#L584), it may be better to prepare the control images in advance to match these resolutions and use `no_resize_control`.
|
| 566 |
+
|
| 567 |
+
<details>
|
| 568 |
+
<summary>日本語</summary>
|
| 569 |
+
|
| 570 |
+
FLUX.1 Kontextのデータセット設定は、制御画像を持つ画像データセットを使用します。ただし、制御画像は1枚しか使用できません。
|
| 571 |
+
|
| 572 |
+
`fp_1f_*`の設定はFLUX.1 Kontextでは使用しません。またマスクも使用されません。
|
| 573 |
+
|
| 574 |
+
また、`no_resize_control`を設定すると、制御画像のリサイズを無効にします。
|
| 575 |
+
|
| 576 |
+
FLUX.1 Kontextは[制御画像の固定解像度](https://github.com/black-forest-labs/flux/blob/1371b2bc70ac80e1078446308dd5b9a2ebc68c87/src/flux/util.py#L584)を想定しているため、これらの解像度にあわせて制御画像を事前に用意し、`no_resize_control`を使用する方が良い場合があります。
|
| 577 |
+
|
| 578 |
+
</details>
|
| 579 |
+
|
| 580 |
+
### Qwen-Image-Edit and Qwen-Image-Edit-2509/2511
|
| 581 |
+
|
| 582 |
+
The Qwen-Image-Edit dataset configuration uses an image dataset with control images. However, only one control image can be used for the standard model (not `2509` or `2511`).
|
| 583 |
+
|
| 584 |
+
By default, the control image is resized to the same resolution (and aspect ratio) as the image.
|
| 585 |
+
|
| 586 |
+
If you set `no_resize_control`, it disables resizing of the control image. For example, if the image is 960x544 and the control image is 512x512, the control image will remain 512x512.
|
| 587 |
+
|
| 588 |
+
Also, you can specify the resolution of the control image separately from the training image resolution by using `control_resolution`. If you want to resize the control images the same as the official code, specify [1024,1024]. **We strongly recommend specifying this value.**
|
| 589 |
+
|
| 590 |
+
`no_resize_control` can be specified together with `control_resolution`.
|
| 591 |
+
|
| 592 |
+
If `no_resize_control` or `control_resolution` is specified, each control image can have a different resolution. The control image is resized according to the specified settings.
|
| 593 |
+
|
| 594 |
+
```toml
|
| 595 |
+
[[datasets]]
|
| 596 |
+
no_resize_control = false # optional, default is false. Disable resizing of control image
|
| 597 |
+
control_resolution = [1024, 1024] # optional, default is None. Specify the resolution of the control image.
|
| 598 |
+
```
|
| 599 |
+
|
| 600 |
+
`fp_1f_*` settings are not used in Qwen-Image-Edit.
|
| 601 |
+
|
| 602 |
+
<details>
|
| 603 |
+
<summary>日本語</summary>
|
| 604 |
+
|
| 605 |
+
Qwen-Image-Editのデータセット設定は、制御画像を持つ画像データセットを使用します。複数枚の制御画像も使用可能ですが、無印(`2509`または`2511`でない)モデルでは1枚のみ使用可能です。
|
| 606 |
+
|
| 607 |
+
デフォルトでは、制御画像は画像と同じ解像度(およびアスペクト比)にリサイズされます。
|
| 608 |
+
|
| 609 |
+
`no_resize_control`を設定すると、制御画像のリサイズを無効にします。たとえば、画像が960x544で制御画像が512x512の場合、制御画像は512x512のままになります。
|
| 610 |
+
|
| 611 |
+
また、`control_resolution`を使用することで、制御画像の解像度を学習画像の解像度と異なる値に指定できます。公式のコードと同じように制御画像をリサイズしたい場合は、[1024, 1024]を指定してください。**この値の指定を強く推奨します。**
|
| 612 |
+
|
| 613 |
+
`no_resize_control`と `control_resolution`は同時に指定できます。
|
| 614 |
+
|
| 615 |
+
`no_resize_control`または`control_resolution`が指定された場合、各制御画像は異なる解像度を持つことができます。制御画像は指定された設定に従ってリサイズされます。
|
| 616 |
+
|
| 617 |
+
```toml
|
| 618 |
+
[[datasets]]
|
| 619 |
+
no_resize_control = false # オプション、デフォルトはfalse。制御画像のリサイズを無効にします
|
| 620 |
+
control_resolution = [1024, 1024] # オプション、デフォルトはNone。制御画像の解像度を指定します
|
| 621 |
+
```
|
| 622 |
+
|
| 623 |
+
`fp_1f_*`の設定はQwen-Image-Editでは使用しません。
|
| 624 |
+
|
| 625 |
+
</details>
|
| 626 |
+
|
| 627 |
+
### FLUX.2
|
| 628 |
+
|
| 629 |
+
The FLUX.2 dataset configuration uses an image dataset with control images (it can also be trained without control images). Multiple control images can be used.
|
| 630 |
+
|
| 631 |
+
`fp_1f_*` settings are not used in FLUX.2.
|
| 632 |
+
|
| 633 |
+
If you set `no_resize_control`, it disables resizing of the control images. If you want to follow the official FLUX.2 inference settings, please specify this option.
|
| 634 |
+
|
| 635 |
+
You can specify the resolution of the control images separately from the training image resolution by using `control_resolution`. If you want to follow the official FLUX.2 inference settings, specify [2024, 2024] (note that it is not 2048) when there is one control image, and [1024, 1024] when there are multiple control images, together with the `no_resize_control` option.
|
| 636 |
+
|
| 637 |
+
<details>
|
| 638 |
+
<summary>日本語</summary>
|
| 639 |
+
|
| 640 |
+
FLUX.2のデータセット設定は、制御画像を持つ画像データセットを使用します(制御画像なしでも学習できます)。複数枚の制御画像が使用可能です。
|
| 641 |
+
|
| 642 |
+
`fp_1f_*`の設定はFLUX.2では使用しません。
|
| 643 |
+
|
| 644 |
+
`no_resize_control`を設定すると、制御画像のリサイズを無効にします。FLUX.2公式の推論時設定に準拠する場合は、このオプションを指定してください。
|
| 645 |
+
|
| 646 |
+
`control_resolution`を使用して、制御画像の解像度を学習画像の解像度と異なる値に指定できます。FLUX.2公式の推論時設定に準拠する場合は、`no_resize_control`オプションと同時に、制御画像が1枚の場合は`[2024, 2024]`(2048ではないので注意)、制御画像が複数の場合は`[1024, 1024]`を指定してください。
|
| 647 |
+
|
| 648 |
+
</details>
|
| 649 |
+
|
| 650 |
+
## Specifications
|
| 651 |
+
|
| 652 |
+
```toml
|
| 653 |
+
# general configurations
|
| 654 |
+
[general]
|
| 655 |
+
resolution = [960, 544] # optional, [W, H], default is [960, 544]. This is the default resolution for all datasets
|
| 656 |
+
caption_extension = ".txt" # optional, default is None. This is the default caption extension for all datasets
|
| 657 |
+
batch_size = 1 # optional, default is 1. This is the default batch size for all datasets
|
| 658 |
+
num_repeats = 1 # optional, default is 1. Number of times to repeat the dataset. Useful to balance the multiple datasets with different sizes.
|
| 659 |
+
enable_bucket = true # optional, default is false. Enable bucketing for datasets
|
| 660 |
+
bucket_no_upscale = false # optional, default is false. Disable upscaling for bucketing. Ignored if enable_bucket is false
|
| 661 |
+
|
| 662 |
+
### Image Dataset
|
| 663 |
+
|
| 664 |
+
# sample image dataset with caption text files
|
| 665 |
+
[[datasets]]
|
| 666 |
+
image_directory = "/path/to/image_dir"
|
| 667 |
+
caption_extension = ".txt" # required for caption text files, if general caption extension is not set
|
| 668 |
+
resolution = [960, 544] # required if general resolution is not set
|
| 669 |
+
batch_size = 4 # optional, overwrite the default batch size
|
| 670 |
+
num_repeats = 1 # optional, overwrite the default num_repeats
|
| 671 |
+
enable_bucket = false # optional, overwrite the default bucketing setting
|
| 672 |
+
bucket_no_upscale = true # optional, overwrite the default bucketing setting
|
| 673 |
+
cache_directory = "/path/to/cache_directory" # optional, default is None to use the same directory as the image directory. NOTE: caching is always enabled
|
| 674 |
+
control_directory = "/path/to/control_dir" # optional, required for dataset with control images
|
| 675 |
+
|
| 676 |
+
# sample image dataset with metadata **jsonl** file
|
| 677 |
+
[[datasets]]
|
| 678 |
+
image_jsonl_file = "/path/to/metadata.jsonl" # includes pairs of image files and captions
|
| 679 |
+
resolution = [960, 544] # required if general resolution is not set
|
| 680 |
+
cache_directory = "/path/to/cache_directory" # required for metadata jsonl file
|
| 681 |
+
# caption_extension is not required for metadata jsonl file
|
| 682 |
+
# batch_size, num_repeats, enable_bucket, bucket_no_upscale are also available for metadata jsonl file
|
| 683 |
+
|
| 684 |
+
### Video Dataset
|
| 685 |
+
|
| 686 |
+
# sample video dataset with caption text files
|
| 687 |
+
[[datasets]]
|
| 688 |
+
video_directory = "/path/to/video_dir"
|
| 689 |
+
caption_extension = ".txt" # required for caption text files, if general caption extension is not set
|
| 690 |
+
resolution = [960, 544] # required if general resolution is not set
|
| 691 |
+
|
| 692 |
+
control_directory = "/path/to/control_dir" # optional, required for dataset with control images
|
| 693 |
+
|
| 694 |
+
# following configurations must be set in each [[datasets]] section for video datasets
|
| 695 |
+
|
| 696 |
+
target_frames = [1, 25, 79] # required for video dataset. list of video lengths to extract frames. each element must be N*4+1 (N=0,1,2,...)
|
| 697 |
+
|
| 698 |
+
# NOTE: Please do not include 1 in target_frames if you are using the frame_extraction "chunk". It will make the all frames to be extracted.
|
| 699 |
+
|
| 700 |
+
frame_extraction = "head" # optional, "head" or "chunk", "full", "slide", "uniform". Default is "head"
|
| 701 |
+
frame_stride = 1 # optional, default is 1, available for "slide" frame extraction
|
| 702 |
+
frame_sample = 4 # optional, default is 1 (same as "head"), available for "uniform" frame extraction
|
| 703 |
+
max_frames = 129 # optional, default is 129. Maximum number of frames to extract, available for "full" frame extraction
|
| 704 |
+
# batch_size, num_repeats, enable_bucket, bucket_no_upscale, cache_directory are also available for video dataset
|
| 705 |
+
|
| 706 |
+
# sample video dataset with metadata jsonl file
|
| 707 |
+
[[datasets]]
|
| 708 |
+
video_jsonl_file = "/path/to/metadata.jsonl" # includes pairs of video files and captions
|
| 709 |
+
|
| 710 |
+
target_frames = [1, 79]
|
| 711 |
+
|
| 712 |
+
cache_directory = "/path/to/cache_directory" # required for metadata jsonl file
|
| 713 |
+
# frame_extraction, frame_stride, frame_sample, max_frames are also available for metadata jsonl file
|
| 714 |
+
```
|
| 715 |
+
|
| 716 |
+
<!--
|
| 717 |
+
# sample image dataset with lance
|
| 718 |
+
[[datasets]]
|
| 719 |
+
image_lance_dataset = "/path/to/lance_dataset"
|
| 720 |
+
resolution = [960, 544] # required if general resolution is not set
|
| 721 |
+
# batch_size, enable_bucket, bucket_no_upscale, cache_directory are also available for lance dataset
|
| 722 |
+
-->
|
| 723 |
+
|
| 724 |
+
The metadata with .json file will be supported in the near future.
|
docs/flux_2.md
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FLUX.2
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of the [FLUX.2](https://huggingface.co/black-forest-labs/FLUX.2-dev) \[dev\] architecture within the Musubi Tuner framework. FLUX.2-dev is an image generation model and edit model that can take a reference image as input.
|
| 6 |
+
|
| 7 |
+
This feature is experimental.
|
| 8 |
+
|
| 9 |
+
Latent pre-caching, training, and inference options can be found in the `--help` output. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 10 |
+
|
| 11 |
+
<details>
|
| 12 |
+
<summary>日本語</summary>
|
| 13 |
+
|
| 14 |
+
</details>
|
| 15 |
+
|
| 16 |
+
## Download the model / モデルのダウンロード
|
| 17 |
+
|
| 18 |
+
You need to download the DiT, AE, Text Encoder models.
|
| 19 |
+
|
| 20 |
+
### FLUX.2 [dev]
|
| 21 |
+
|
| 22 |
+
- **DiT, AE**: Download from the [black-forest-labs/FLUX.2-dev](https://huggingface.co/black-forest-labs/FLUX.2-dev) repository. Use `flux2-dev.safetensors` and `ae.safetensors`. The weights in the subfolder are in Diffusers format and cannot be used.
|
| 23 |
+
- **Text Encoder (Mistral 3)**: Download all the split files from the [black-forest-labs/FLUX.2-dev](https://huggingface.co/black-forest-labs/FLUX.2-dev) repository and specify the first file (e.g., `00001-of-00010.safetensors`) in the arguments.
|
| 24 |
+
|
| 25 |
+
<details>
|
| 26 |
+
<summary>日本語</summary>
|
| 27 |
+
|
| 28 |
+
DiT, AE, Text Encoder のモデルをダウンロードする必要があります。
|
| 29 |
+
|
| 30 |
+
- **DiT, AE**: [black-forest-labs/FLUX.2-dev](https://huggingface.co/black-forest-labs/FLUX.2-dev) リポジトリからダウンロードしてください。`flux2-dev.safetensors` および `ae.safetensors` を使用してください。サブフォルダ内の重みはDiffusers形式なので使用できません。
|
| 31 |
+
- **Text Encoder (Mistral 3)**: Download all the split files from the [black-forest-labs/FLUX.2-dev](https://huggingface.co/black-forest-labs/FLUX.2-dev) repository and specify the first file (e.g., `00001-of-00010.safetensors`) in the arguments.
|
| 32 |
+
</details>
|
| 33 |
+
|
| 34 |
+
### FLUX.2 [klein] 4B / base 4B
|
| 35 |
+
|
| 36 |
+
- **DiT 4B**: Download from the [black-forest-labs/FLUX.2-klein-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-4B) repository. Use `flux2-klein-4b.safetensors`.
|
| 37 |
+
- **DiT base 4B**: Download from the [black-forest-labs/FLUX.2-klein-base-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-base-4B) repository. Use `flux2-klein-base-4b.safetensors`.
|
| 38 |
+
- **AE**: Download from the [black-forest-labs/FLUX.2](https://huggingface.co/black-forest-labs/FLUX.2-dev) repository. Use `ae.safetensors`. `vae/diffusion_pytorch_model.safetensors` in the subfolder is in Diffusers format and cannot be used.
|
| 39 |
+
- **Qwen3 4B Text Encoder**: Download all the split files from the [black-forest-labs/FLUX.2-klein-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-4B) repository and specify the first file (e.g., `00001-of-00002.safetensors`) in the arguments.
|
| 40 |
+
|
| 41 |
+
If you already have the weights for Qwen3 4B used in Z-Image, you can use them as is. Refer to the [Z-Image documentation](./zimage.md#download-the-model--モデルのダウンロード) for details.
|
| 42 |
+
|
| 43 |
+
<details>
|
| 44 |
+
<summary>日本語</summary>
|
| 45 |
+
|
| 46 |
+
- **DiT 4B**: [black-forest-labs/FLUX.2-klein-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-4B) リポジトリからダウンロードしてください。`flux2-klein-4b.safetensors` を使用してください。
|
| 47 |
+
- **DiT base 4B**: [black-forest-labs/FLUX.2-klein-base-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-base-4B) リポジトリからダウンロードしてください。`flux2-klein-base-4b.safetensors` を使用してください。
|
| 48 |
+
- **AE**: [black-forest-labs/FLUX.2](https://huggingface.co/black-forest-labs/FLUX.2-dev) リポジトリからダウンロードしてください。`ae.safetensors` を使用してください。サブフォルダ内の `vae/diffusion_pytorch_model.safetensors` はDiffusers形式なので使用できません。
|
| 49 |
+
- **Qwen3 4B Text Encoder**: [black-forest-labs/FLUX.2-klein-4B](https://huggingface.co/black-forest-labs/FLUX.2-klein-4B) リポジトリから分割されたすべてのファイルをダウンロードし、最初のファイル(例:`00001-of-00002.safetensors`)を引数で指定してください。
|
| 50 |
+
|
| 51 |
+
Qwen3 4Bの重みは、すでにZ-Imageで用いているものがあればそのまま使用可能です。[Z-Imageのドキュメント](./zimage.md#download-the-model--モデルのダウンロード)を参照してください。
|
| 52 |
+
|
| 53 |
+
</details>
|
| 54 |
+
|
| 55 |
+
### FLUX.2 [klein] 9B / base 9B
|
| 56 |
+
|
| 57 |
+
- **DiT 9B**: Download from the [black-forest-labs/FLUX.2-klein-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-9B) repository. Use `flux2-klein-9b.safetensors`.
|
| 58 |
+
- **DiT base 9B**: Download from the [black-forest-labs/FLUX.2-klein-base-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-base-9B) repository. Use `flux2-klein-base-9b.safetensors`.
|
| 59 |
+
- **AE**: Download from the [black-forest-labs/FLUX.2](https://huggingface.co/black-forest-labs/FLUX.2-dev) repository. Use `ae.safetensors`. `vae/diffusion_pytorch_model.safetensors` in the subfolder is in Diffusers format and cannot be used.
|
| 60 |
+
- **Qwen3 8B Text Encoder**: Download all the split files from the [black-forest-labs/FLUX.2-klein-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-9B) repository and specify the first file (e.g., `00001-of-00004.safetensors`) in the arguments.
|
| 61 |
+
|
| 62 |
+
<details>
|
| 63 |
+
<summary>日本語</summary>
|
| 64 |
+
|
| 65 |
+
- **DiT 9B**: [black-forest-labs/FLUX.2-klein-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-9B) リポジトリからダウンロードしてください。`flux2-klein-9b.safetensors` を使用してください。
|
| 66 |
+
- **DiT base 9B**: [black-forest-labs/FLUX.2-klein-base-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-base-9B) リポジトリからダウンロードしてください。`flux2-klein-base-9b.safetensors` を使用してください。
|
| 67 |
+
- **AE**: [black-forest-labs/FLUX.2](https://huggingface.co/black-forest-labs/FLUX.2-dev) リポジトリからダウンロードしてください。`ae.safetensors` を使用してください。サブフォルダ内の `vae/diffusion_pytorch_model.safetensors` はDiffusers形式なので使用できません。
|
| 68 |
+
- **Qwen3 8B Text Encoder**: [black-forest-labs/FLUX.2-klein-9B](https://huggingface.co/black-forest-labs/FLUX.2-klein-9B) リポジトリから分割されたすべてのファイルをダウンロードし、最初のファイル(例:`00001-of-00004.safetensors`)を引数で指定してください。
|
| 69 |
+
</details>
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
## Specifying Model Version / モデルバージョンの指定
|
| 73 |
+
|
| 74 |
+
When specifying the model version in various scripts, use the following options:
|
| 75 |
+
|
| 76 |
+
|type|version|sampling guidance scale|num sampling steps|
|
| 77 |
+
|----|--------|----|----|
|
| 78 |
+
|flux.2-dev|`--model_version dev`|4.0|50|
|
| 79 |
+
|flux.2-klein-4b|`--model_version klein-4b`|1.0|4|
|
| 80 |
+
|flux.2-klein-base-4b|`--model_version klein-base-4b`|4.0|50|
|
| 81 |
+
|flux.2-klein-9b|`--model_version klein-9b`|1.0|4|
|
| 82 |
+
|flux.2-klein-base-9b|`--model_version klein-base-9b`|4.0|50|
|
| 83 |
+
|
| 84 |
+
For model training, it is recommended to use klein base 4B or 9B. The dev and klein 4B/9B are distilled models primarily intended for inference.
|
| 85 |
+
|
| 86 |
+
<details>
|
| 87 |
+
<summary>日本語</summary>
|
| 88 |
+
|
| 89 |
+
それぞれのスクリプトでモデルバージョンを指定する際には、英語版の文章を参考にして`--model_version`オプションを使用してください。
|
| 90 |
+
|
| 91 |
+
モデルの学習を行う場合は、klein base 4Bまたは9Bを使用することをお勧めします。dev、およびklein 4B/9Bは蒸留モデルであり、主に推論用です。
|
| 92 |
+
|
| 93 |
+
</details>
|
| 94 |
+
|
| 95 |
+
## Pre-caching / 事前キャッシング
|
| 96 |
+
|
| 97 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 98 |
+
|
| 99 |
+
Latent pre-caching uses a dedicated script for FLUX.2.
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
python src/musubi_tuner/flux_2_cache_latents.py \
|
| 103 |
+
--dataset_config path/to/toml \
|
| 104 |
+
--vae path/to/ae_model \
|
| 105 |
+
--model_version dev
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
- Note that the `--vae` argument is required, not `--ae`.
|
| 109 |
+
- Uses `flux_2_cache_latents.py`.
|
| 110 |
+
- The dataset must be an image dataset.
|
| 111 |
+
- Use the `--model_version` option for Flux.2 Klein training (if omitted, defaults to `dev`).
|
| 112 |
+
- The `control_images` in the dataset config is used as the reference image. See [Dataset Config](./dataset_config.md#flux1-kontext-dev) for details.
|
| 113 |
+
- `--vae_dtype` option is available to specify the VAE weight data type. Default is `float32`, `bfloat16` can also be specified. Specifying `bfloat16` reduces VRAM usage.
|
| 114 |
+
|
| 115 |
+
<details>
|
| 116 |
+
<summary>日本語</summary>
|
| 117 |
+
|
| 118 |
+
latentの事前キャッシングはFLUX.2専用のスクリプトを使用します。
|
| 119 |
+
|
| 120 |
+
- `flux_2_cache_latents.py`を使用します。
|
| 121 |
+
- `--ae`ではなく、`--vae`引数を指定してください。
|
| 122 |
+
- データセットは画像データセットである必要があります。
|
| 123 |
+
- データセット設定の`control_images`が参照画像として使用されます。詳細は[データセット設定](./dataset_config.md#flux1-kontext-dev)を参照してください。
|
| 124 |
+
- `--vae_dtype`オプションは、VAEの重みデータ型を指定するためのオプションです。デフォルトは`float32`で、`bfloat16`も指定可能です。`bfloat16`を指定するとVRAM使用量が削減されます。
|
| 125 |
+
|
| 126 |
+
</details>
|
| 127 |
+
|
| 128 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 129 |
+
|
| 130 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 131 |
+
|
| 132 |
+
```bash
|
| 133 |
+
python src/musubi_tuner/flux_2_cache_text_encoder_outputs.py \
|
| 134 |
+
--dataset_config path/to/toml \
|
| 135 |
+
--text_encoder path/to/text_encoder \
|
| 136 |
+
--batch_size 16 \
|
| 137 |
+
--model_version dev
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
- Uses `flux_2_cache_text_encoder_outputs.py`.
|
| 141 |
+
- Requires `--text_encoder` argument
|
| 142 |
+
- Use the `--model_version` option for Flux.2 Klein training (if omitted, defaults to `dev`).
|
| 143 |
+
- Use `--fp8_text_encoder` option to run the Text Encoder in fp8 mode for VRAM savings.
|
| 144 |
+
- The larger the batch size, the more VRAM is required. Adjust `--batch_size` according to your VRAM capacity.
|
| 145 |
+
|
| 146 |
+
<details>
|
| 147 |
+
<summary>日本語</summary>
|
| 148 |
+
|
| 149 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 150 |
+
|
| 151 |
+
- `flux_2_cache_text_encoder_outputs.py`を使用します。
|
| 152 |
+
- テキストエンコーダーをfp8モードで実行するための`--fp8_text_encoder`オプションを使用します。
|
| 153 |
+
- バッチサイズが大きいほど、より多くのVRAMが必要です。VRAM容量に応じて`--batch_size`を調整してください。
|
| 154 |
+
|
| 155 |
+
</details>
|
| 156 |
+
|
| 157 |
+
## Training / 学習
|
| 158 |
+
|
| 159 |
+
Training uses a dedicated script `flux_2_train_network.py`.
|
| 160 |
+
|
| 161 |
+
```bash
|
| 162 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/flux_2_train_network.py \
|
| 163 |
+
--model_version dev \
|
| 164 |
+
--dit path/to/dit_model \
|
| 165 |
+
--vae path/to/ae_model \
|
| 166 |
+
--text_encoder path/to/text_encoder \
|
| 167 |
+
--dataset_config path/to/toml \
|
| 168 |
+
--sdpa --mixed_precision bf16 \
|
| 169 |
+
--timestep_sampling flux2_shift --weighting_scheme none \
|
| 170 |
+
--optimizer_type adamw8bit --learning_rate 1e-4 --gradient_checkpointing \
|
| 171 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 172 |
+
--network_module networks.lora_flux_2 --network_dim 32 \
|
| 173 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 174 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
- Uses `flux_2_train_network.py`.
|
| 178 |
+
- **Requires** specifying `--vae` (not `--ae`), `--text_encoder`
|
| 179 |
+
- **Requires** specifying `--network_module networks.lora_flux_2`.
|
| 180 |
+
- `--mixed_precision bf16` is recommended for FLUX.2 training.
|
| 181 |
+
- `--timestep_sampling flux2_shift` is recommended for FLUX.2.
|
| 182 |
+
- Use the `--model_version` option for Flux.2 Klein training (if omitted, defaults to `dev`).
|
| 183 |
+
- Memory saving options like `--fp8_base --fp8_scaled` (for DiT, specify both) and `--fp8_text_encoder` (for Text Encoder) are available. `--fp8_scaled` is recommended when using `--fp8_base` for DiT.
|
| 184 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 185 |
+
- `--vae_dtype` option is available to specify the VAE weight data type. Default is `float32`, `bfloat16` can also be specified.
|
| 186 |
+
- Instead of `--sdpa`, `--xformers` and `--flash_attn` can also be used. Make sure the related libraries are installed.
|
| 187 |
+
|
| 188 |
+
`--fp8_text_encoder` option is not available for dev (Mistral 3).
|
| 189 |
+
|
| 190 |
+
Some blocks can be offloaded to CPU for memory savings using the `--blocks_to_swap` option. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 191 |
+
|
| 192 |
+
In FLUX.2, since DoubleStreamBlock uses more memory than SingleStreamBlock and the number of each block varies by model, the actual number of offloaded blocks is automatically adjusted (double block + single block * 2 = number of swap blocks).
|
| 193 |
+
|
| 194 |
+
The maximum values of `blocks_to_swap` per model when combined with the `--fp8_base --fp8_scaled` options are as follows:
|
| 195 |
+
|
| 196 |
+
|Model Type|Maximum blocks_to_swap|
|
| 197 |
+
|----|----|
|
| 198 |
+
|flux.2-dev|29|
|
| 199 |
+
|flux.2-klein-4b|13|
|
| 200 |
+
|flux.2-klein-9b|16|
|
| 201 |
+
|
| 202 |
+
<details>
|
| 203 |
+
<summary>日本語</summary>
|
| 204 |
+
|
| 205 |
+
FLUX.2の学習は専用のスクリプト`flux_2_train_network.py`を使用します。
|
| 206 |
+
|
| 207 |
+
- `flux_2_train_network.py`を使用します。
|
| 208 |
+
- `--ae`、`--text_encoder` を指定する必要があります。
|
| 209 |
+
- `--network_module networks.lora_flux_2`を指定する必要があります。
|
| 210 |
+
- FLUX.2の学習には`--mixed_precision bf16`を推奨します。
|
| 211 |
+
- FLUX.2には`--timestep_sampling flux2_shift`を推奨します。
|
| 212 |
+
- `--fp8_base --fp8_scaled`(DiT用、両方指定してください)や`--fp8_text_encoder`(テキストエンコーダー用)などのメモリ節約オプションが利用可能です。`--fp8_base`をDiTに使用する場合は、`--fp8_scaled`を推奨します。
|
| 213 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。
|
| 214 |
+
- `--vae_dtype`オプションは、VAEの重みデータ型を指定するためのオプションです。デフォルトは`float32`で、`bfloat16`も指定可能です。
|
| 215 |
+
- `--sdpa`の代わりに`--xformers`および`--flash_attn`を使用することも可能です。関連するライブラリがインストールされていることを確認してください。
|
| 216 |
+
|
| 217 |
+
`--fp8_text_encoder`オプションはdev(Mistral 3)では使用できません。
|
| 218 |
+
|
| 219 |
+
一部のブロックをメモリ節約のためにCPUにオフロードする`--blocks_to_swap`オプションも利用可能です。詳細は[HunyuanVideoのドキュメント](./hunyuan_video.md#memory-optimization)を参照してください。
|
| 220 |
+
|
| 221 |
+
FLUX.2ではDoubleStreamBlockのメモリ使用量がSingleStreamBlockよりも大きいのと、それぞれのブロック数がモデルごとに異なるため、実際にオフロードされるブロック数は自動調整されます(double block + single block * 2 = swap block数)。
|
| 222 |
+
|
| 223 |
+
`--fp8_base --fp8_scaled`オプションと組み合わせたときの、モデルごとの`blocks_to_swap`の最大値は以下の通りです。
|
| 224 |
+
|
| 225 |
+
|モデル種類|blocks_to_swapの最大値|
|
| 226 |
+
|----|----|
|
| 227 |
+
|flux.2-dev|29|
|
| 228 |
+
|flux.2-klein-4b|13|
|
| 229 |
+
|flux.2-klein-9b|16|
|
| 230 |
+
|
| 231 |
+
</details>
|
| 232 |
+
|
| 233 |
+
## Inference / 推論
|
| 234 |
+
|
| 235 |
+
Inference uses a dedicated script `flux_2_generate_image.py`.
|
| 236 |
+
|
| 237 |
+
```bash
|
| 238 |
+
python src/musubi_tuner/flux_2_generate_image.py \
|
| 239 |
+
--model_version dev \
|
| 240 |
+
--dit path/to/dit_model \
|
| 241 |
+
--vae path/to/ae_model \
|
| 242 |
+
--text_encoder path/to/text_encoder \
|
| 243 |
+
--control_image_path path/to/control_image.jpg \
|
| 244 |
+
--prompt "A cat" \
|
| 245 |
+
--image_size 1024 1024 --infer_steps 50 \
|
| 246 |
+
--fp8_scaled \
|
| 247 |
+
--save_path path/to/save/dir --output_type images \
|
| 248 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 249 |
+
```
|
| 250 |
+
|
| 251 |
+
- Uses `flux_2_generate_image.py`.
|
| 252 |
+
- **Requires** specifying `--vae`, `--text_encoder`
|
| 253 |
+
- **Requires** specifying `--control_image_path` for the reference image.
|
| 254 |
+
- Use the `--model_version` option for Flux.2 Klein inference (if omitted, defaults to `dev`).
|
| 255 |
+
- `--no_resize_control`: By default, the control image is resized to the recommended resolution for FLUX.2. If you specify this option, this resizing is skipped, and the image is used as-is.
|
| 256 |
+
|
| 257 |
+
This feature is not officially supported by FLUX.2, but it is available for experimental use.
|
| 258 |
+
|
| 259 |
+
- `--image_size` is the size of the generated image, height and width are specified in that order.
|
| 260 |
+
- `--prompt`: Prompt for generation.
|
| 261 |
+
- `--fp8_scaled` option is available for DiT to reduce memory usage. Quality may be slightly lower. `--fp8_text_encoder` option is available to reduce memory usage of Text Encoder. `--fp8` alone is also an option for DiT but `--fp8_scaled` potentially offers better quality.
|
| 262 |
+
- LoRA loading options (`--lora_weight`, `--lora_multiplier`, `--include_patterns`, `--exclude_patterns`) are available. `--lycoris` is also supported.
|
| 263 |
+
- `--embedded_cfg_scale` (default 2.5) controls the distilled guidance scale.
|
| 264 |
+
- `--save_merged_model` option is available to save the DiT model after merging LoRA weights. Inference is skipped if this is specified.
|
| 265 |
+
|
| 266 |
+
<details>
|
| 267 |
+
<summary>日本語</summary>
|
| 268 |
+
|
| 269 |
+
FLUX.2の推論は専用のスクリプト`flux_2_generate_image.py`を使用します。
|
| 270 |
+
|
| 271 |
+
- `flux_2_generate_image.py`を使用します。
|
| 272 |
+
- `--vae`、`--text_encoder` を指定する必要があります。
|
| 273 |
+
- `--control_image_path`を指定する必要があります(参照画像)。
|
| 274 |
+
- `--no_resize_control`: デフォルトでは、参照画像はFLUX.2の推奨解像度にリサイズされます。このオプションを指定すると、このリサイズはスキップされ、画像はそのままのサイズで使用されます。
|
| 275 |
+
|
| 276 |
+
この機能はFLUX.2では公式にサポートされていませんが、実験的に使用可能です。
|
| 277 |
+
|
| 278 |
+
- `--image_size`は生成する画像のサイズで、高さと幅をその順番で指定します。
|
| 279 |
+
- `--prompt`: 生成用のプロンプトです。
|
| 280 |
+
- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。品質はやや低下する可能性があります。またText Encoder 1のメモリ使用量を削減するために、`--fp8_text_encoder`オプションを指定可能です。DiT用に`--fp8`単独のオプションも用意されていますが、`--fp8_scaled`の方が品質が良い可能性があります。
|
| 281 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。
|
| 282 |
+
- `--embedded_cfg_scale`(デフォルト2.5)は、蒸留されたガイダンススケールを制御します。
|
| 283 |
+
- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。
|
| 284 |
+
|
| 285 |
+
</details>
|
docs/flux_kontext.md
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FLUX.1 Kontext
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of the [FLUX.1 Kontext](https://github.com/black-forest-labs/flux) \[dev\] architecture within the Musubi Tuner framework. FLUX.1 Kontext is an image generation model that can take a reference image as input.
|
| 6 |
+
|
| 7 |
+
This feature is experimental.
|
| 8 |
+
|
| 9 |
+
Latent pre-caching, training, and inference options can be found in the `--help` output. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 10 |
+
|
| 11 |
+
<details>
|
| 12 |
+
<summary>日本語</summary>
|
| 13 |
+
|
| 14 |
+
このドキュメントは、Musubi Tunerフレームワーク内での[FLUX.1 Kontext](https://github.com/black-forest-labs/flux) \[dev\] アーキテクチャの使用法について説明しています。FLUX.1 Kontextは、参照画像をコンテキストとして入力できる画像生成モデルです。
|
| 15 |
+
|
| 16 |
+
この機能は実験的なものです。
|
| 17 |
+
|
| 18 |
+
事前キャッシング、学習、推論のオプションは`--help`で確認してください。HunyuanVideoと共通のオプションが多くありますので、必要に応じて[HunyuanVideoのドキュメント](./hunyuan_video.md)も参照してください。
|
| 19 |
+
|
| 20 |
+
</details>
|
| 21 |
+
|
| 22 |
+
## Download the model / モデルのダウンロード
|
| 23 |
+
|
| 24 |
+
You need to download the DiT, AE, Text Encoder 1 (T5-XXL), and Text Encoder 2 (CLIP-L) models.
|
| 25 |
+
|
| 26 |
+
- **DiT, AE**: Download from the [black-forest-labs/FLUX.1-kontext](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) repository. Use `flux1-kontext-dev.safetensors` and `ae.safetensors`. The weights in the subfolder are in Diffusers format and cannot be used.
|
| 27 |
+
- **Text Encoder 1 (T5-XXL), Text Encoder 2 (CLIP-L)**: Download from the [ComfyUI FLUX Text Encoders](https://huggingface.co/comfyanonymous/flux_text_encoders) repository. Please use `t5xxl_fp16.safetensors` for T5-XXL. Thanks to ComfyUI for providing these models.
|
| 28 |
+
|
| 29 |
+
<details>
|
| 30 |
+
<summary>日本語</summary>
|
| 31 |
+
|
| 32 |
+
DiT, AE, Text Encoder 1 (T5-XXL), Text Encoder 2 (CLIP-L) のモデルをダウンロードする必要があります。
|
| 33 |
+
|
| 34 |
+
- **DiT, AE**: [black-forest-labs/FLUX.1-kontext](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) リポジトリからダウンロードしてください。`flux1-kontext-dev.safetensors` および `ae.safetensors` を使用してください。サブフォルダ内の重みはDiffusers形式なので使用できません。
|
| 35 |
+
- **Text Encoder 1 (T5-XXL), Text Encoder 2 (CLIP-L)**: [ComfyUIのFLUX Text Encoders](https://huggingface.co/comfyanonymous/flux_text_encoders) リポジトリからダウンロードしてください。T5-XXLには`t5xxl_fp16.safetensors`を使用してください。これらのモデルをご提供いただいたComfyUIに感謝します。
|
| 36 |
+
</details>
|
| 37 |
+
|
| 38 |
+
## Pre-caching / 事前キャッシング
|
| 39 |
+
|
| 40 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 41 |
+
|
| 42 |
+
Latent pre-caching uses a dedicated script for FLUX.1 Kontext.
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
python src/musubi_tuner/flux_kontext_cache_latents.py \
|
| 46 |
+
--dataset_config path/to/toml \
|
| 47 |
+
--vae path/to/ae_model
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
- Note that the `--vae` argument is required, not `--ae`.
|
| 51 |
+
- Uses `flux_kontext_cache_latents.py`.
|
| 52 |
+
- The dataset must be an image dataset.
|
| 53 |
+
- The `control_images` in the dataset config is used as the reference image. See [Dataset Config](./dataset_config.md#flux1-kontext-dev) for details.
|
| 54 |
+
|
| 55 |
+
<details>
|
| 56 |
+
<summary>日本語</summary>
|
| 57 |
+
|
| 58 |
+
latentの事前キャッシングはFLUX.1 Kontext専用のスクリプトを使用します。
|
| 59 |
+
|
| 60 |
+
- `flux_kontext_cache_latents.py`を使用します。
|
| 61 |
+
- `--ae`ではなく、`--vae`引数を指定してください。
|
| 62 |
+
- データセットは画像データセットである必要があります。
|
| 63 |
+
- データセット設定の`control_images`が参照画像として使用されます。詳細は[データセット設定](./dataset_config.md#flux1-kontext-dev)を参照してください。
|
| 64 |
+
|
| 65 |
+
</details>
|
| 66 |
+
|
| 67 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 68 |
+
|
| 69 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
python src/musubi_tuner/flux_kontext_cache_text_encoder_outputs.py \
|
| 73 |
+
--dataset_config path/to/toml \
|
| 74 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 75 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 76 |
+
--batch_size 16
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
- Uses `flux_kontext_cache_text_encoder_outputs.py`.
|
| 80 |
+
- Requires both `--text_encoder1` (T5) and `--text_encoder2` (CLIP) arguments.
|
| 81 |
+
- Use `--fp8_t5` option to run the T5 Text Encoder in fp8 mode for VRAM savings.
|
| 82 |
+
- The larger the batch size, the more VRAM is required. Adjust `--batch_size` according to your VRAM capacity.
|
| 83 |
+
|
| 84 |
+
<details>
|
| 85 |
+
<summary>日本語</summary>
|
| 86 |
+
|
| 87 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 88 |
+
|
| 89 |
+
- `flux_kontext_cache_text_encoder_outputs.py`を使用します。
|
| 90 |
+
- T5とCLIPの両方の引数が必要です。
|
| 91 |
+
- T5テキストエンコーダーをfp8モードで実行するための`--fp8_t5`オプションを使用します。
|
| 92 |
+
- バッチサイズが大きいほど、より多くのVRAMが必要です。VRAM容量に応じて`--batch_size`を調整してください。
|
| 93 |
+
|
| 94 |
+
</details>
|
| 95 |
+
|
| 96 |
+
## Training / 学習
|
| 97 |
+
|
| 98 |
+
Training uses a dedicated script `flux_kontext_train_network.py`.
|
| 99 |
+
|
| 100 |
+
```bash
|
| 101 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/flux_kontext_train_network.py \
|
| 102 |
+
--dit path/to/dit_model \
|
| 103 |
+
--vae path/to/ae_model \
|
| 104 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 105 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 106 |
+
--dataset_config path/to/toml \
|
| 107 |
+
--sdpa --mixed_precision bf16 \
|
| 108 |
+
--timestep_sampling flux_shift --weighting_scheme none \
|
| 109 |
+
--optimizer_type adamw8bit --learning_rate 1e-4 --gradient_checkpointing \
|
| 110 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 111 |
+
--network_module networks.lora_flux --network_dim 32 \
|
| 112 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 113 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
- Uses `flux_kontext_train_network.py`.
|
| 117 |
+
- **Requires** specifying `--vae` (not `--ae`), `--text_encoder1`, and `--text_encoder2`.
|
| 118 |
+
- **Requires** specifying `--network_module networks.lora_flux`.
|
| 119 |
+
- `--mixed_precision bf16` is recommended for FLUX.1 Kontext training.
|
| 120 |
+
- `--timestep_sampling flux_shift` is recommended for FLUX.1 Kontext.
|
| 121 |
+
- Memory saving options like `--fp8` (for DiT) and `--fp8_t5` (for Text Encoder 1) are available. `--fp8_scaled` is recommended when using `--fp8` for DiT.
|
| 122 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 123 |
+
|
| 124 |
+
<details>
|
| 125 |
+
<summary>日本語</summary>
|
| 126 |
+
|
| 127 |
+
FLUX.1 Kontextの学習は専用のスクリプト`flux_kontext_train_network.py`を使用します。
|
| 128 |
+
|
| 129 |
+
- `flux_kontext_train_network.py`を使用します。
|
| 130 |
+
- `--ae`、`--text_encoder1`、`--text_encoder2`を指定する必要があります。
|
| 131 |
+
- `--network_module networks.lora_flux`を指定する必要があります。
|
| 132 |
+
- FLUX.1 Kontextの学習には`--mixed_precision bf16`を推奨します。
|
| 133 |
+
- FLUX.1 Kontextには`--timestep_sampling flux_shift`を推奨します。
|
| 134 |
+
- `--fp8`(DiT用)や`--fp8_t5`(テキストエンコーダー1用)などのメモリ節約オプションが利用可能です。`--fp8_scaled`を使用することをお勧めします。
|
| 135 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。
|
| 136 |
+
|
| 137 |
+
</details>
|
| 138 |
+
|
| 139 |
+
## Inference / 推論
|
| 140 |
+
|
| 141 |
+
Inference uses a dedicated script `flux_kontext_generate_image.py`.
|
| 142 |
+
|
| 143 |
+
```bash
|
| 144 |
+
python src/musubi_tuner/flux_kontext_generate_image.py \
|
| 145 |
+
--dit path/to/dit_model \
|
| 146 |
+
--vae path/to/ae_model \
|
| 147 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 148 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 149 |
+
--control_image_path path/to/control_image.jpg \
|
| 150 |
+
--prompt "A cat" \
|
| 151 |
+
--image_size 1024 1024 --infer_steps 25 \
|
| 152 |
+
--attn_mode sdpa --fp8_scaled \
|
| 153 |
+
--save_path path/to/save/dir --output_type images \
|
| 154 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
- Uses `flux_kontext_generate_image.py`.
|
| 158 |
+
- **Requires** specifying `--vae`, `--text_encoder1`, and `--text_encoder2`.
|
| 159 |
+
- **Requires** specifying `--control_image_path` for the reference image.
|
| 160 |
+
- `--no_resize_control`: By default, the control image is resized to the recommended resolution for FLUX.1 Kontext. If you specify this option, this resizing is skipped, and the image is used as-is.
|
| 161 |
+
|
| 162 |
+
This feature is not officially supported by FLUX.1 Kontext, but it is available for experimental use.
|
| 163 |
+
|
| 164 |
+
- `--image_size` is the size of the generated image, height and width are specified in that order.
|
| 165 |
+
- `--prompt`: Prompt for generation.
|
| 166 |
+
- `--fp8_scaled` option is available for DiT to reduce memory usage. Quality may be slightly lower. `--fp8_t5` option is available to reduce memory usage of Text Encoder 1. `--fp8` alone is also an option for DiT but `--fp8_scaled` potentially offers better quality.
|
| 167 |
+
- LoRA loading options (`--lora_weight`, `--lora_multiplier`, `--include_patterns`, `--exclude_patterns`) are available. `--lycoris` is also supported.
|
| 168 |
+
- `--embedded_cfg_scale` (default 2.5) controls the distilled guidance scale.
|
| 169 |
+
- `--save_merged_model` option is available to save the DiT model after merging LoRA weights. Inference is skipped if this is specified.
|
| 170 |
+
|
| 171 |
+
<details>
|
| 172 |
+
<summary>日本語</summary>
|
| 173 |
+
|
| 174 |
+
FLUX.1 Kontextの推論は専用のスクリプト`flux_kontext_generate_image.py`を使用します。
|
| 175 |
+
|
| 176 |
+
- `flux_kontext_generate_image.py`を使用します。
|
| 177 |
+
- `--vae`、`--text_encoder1`、`--text_encoder2`を指定する必要があります。
|
| 178 |
+
- `--control_image_path`を指定する必要があります(参照画像)。
|
| 179 |
+
- `--no_resize_control`: デフォルトでは、参照画像はFLUX.1 Kontextの推奨解像度にリサイズされます。このオプションを指定すると、このリサイズはスキップされ、画像はそのままのサイズで使用されます。
|
| 180 |
+
|
| 181 |
+
この機能はFLUX.1 Kontextでは公式にサポートされていませんが、実験的に使用可能です。
|
| 182 |
+
|
| 183 |
+
- `--image_size`は生成する画像のサイズで、高さと幅をその順番で指定します。
|
| 184 |
+
- `--prompt`: 生成用のプロンプトです。
|
| 185 |
+
- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。品質はやや低下する可能性があります。またText Encoder 1のメモリ使用量を削減するために、`--fp8_t5`オプションを指定可能です。DiT用に`--fp8`単独のオプションも用意されていますが、`--fp8_scaled`の方が品質が良い可能性があります。
|
| 186 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。
|
| 187 |
+
- `--embedded_cfg_scale`(デフォルト2.5)は、蒸留されたガイダンススケールを制御します。
|
| 188 |
+
- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。
|
| 189 |
+
|
| 190 |
+
</details>
|
docs/framepack.md
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FramePack
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of the [FramePack](https://github.com/lllyasviel/FramePack) architecture within the Musubi Tuner framework. FramePack is a novel video generation architecture developed by lllyasviel.
|
| 6 |
+
|
| 7 |
+
Key differences from HunyuanVideo:
|
| 8 |
+
- FramePack only supports Image-to-Video (I2V) generation. Text-to-Video (T2V) is not supported.
|
| 9 |
+
- It utilizes a different DiT model architecture and requires an additional Image Encoder. VAE is same as HunyuanVideo. Text Encoders seem to be the same as HunyuanVideo but we employ the original FramePack method to utilize them.
|
| 10 |
+
- Caching and training scripts are specific to FramePack (`fpack_*.py`).
|
| 11 |
+
- Due to its progressive generation nature, VRAM usage can be significantly lower, especially for longer videos, compared to other architectures.
|
| 12 |
+
|
| 13 |
+
The official documentation does not provide detailed explanations on how to train the model, but it is based on the FramePack implementation and paper.
|
| 14 |
+
|
| 15 |
+
This feature is experimental.
|
| 16 |
+
|
| 17 |
+
For one-frame inference and training, see [here](./framepack_1f.md).
|
| 18 |
+
|
| 19 |
+
Latent pre-caching, training, and inference options can be found in the `--help` output. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 20 |
+
|
| 21 |
+
<details>
|
| 22 |
+
<summary>日本語</summary>
|
| 23 |
+
|
| 24 |
+
このドキュメントは、Musubi Tunerフレームワーク内での[FramePack](https://github.com/lllyasviel/FramePack) アーキテクチャの使用法について説明しています。FramePackは、lllyasviel氏にによって開発された新しいビデオ生成アーキテクチャです。
|
| 25 |
+
|
| 26 |
+
HunyuanVideoとの主な違いは次のとおりです。
|
| 27 |
+
- FramePackは、画像からビデオ(I2V)生成のみをサポートしています。テキストからビデオ(T2V)はサポートされていません。
|
| 28 |
+
- 異なるDiTモデルアーキテクチャを使用し、追加の画像エンコーダーが必要です。VAEはHunyuanVideoと同じです。テキストエンコーダーはHunyuanVideoと同じと思われますが、FramePack公式と同じ方法で推論を行っています。
|
| 29 |
+
- キャッシングと学習スクリプトはFramePack専用(`fpack_*.py`)です。
|
| 30 |
+
- セクションずつ生成するため、他のアーキテクチャと比較して、特に長いビデオの場合、VRAM使用量が大幅に少なくなる可能性があります。
|
| 31 |
+
|
| 32 |
+
学習方法について公式からは詳細な説明はありませんが、FramePackの実装と論文を参考にしています。
|
| 33 |
+
|
| 34 |
+
この機能は実験的なものです。
|
| 35 |
+
|
| 36 |
+
1フレーム推論、学習については[こちら](./framepack_1f.md)を参照してください。
|
| 37 |
+
|
| 38 |
+
事前キャッシング、学習、推論のオプションは`--help`で確認してください。HunyuanVideoと共通のオプションが多くありますので、必要に応じて[HunyuanVideoのドキュメント](./hunyuan_video.md)も参照してください。
|
| 39 |
+
|
| 40 |
+
</details>
|
| 41 |
+
|
| 42 |
+
## Download the model / モデルのダウンロード
|
| 43 |
+
|
| 44 |
+
You need to download the DiT, VAE, Text Encoder 1 (LLaMA), Text Encoder 2 (CLIP), and Image Encoder (SigLIP) models specifically for FramePack. Several download options are available for each component.
|
| 45 |
+
|
| 46 |
+
***Note:** The weights are publicly available on the following page: [maybleMyers/framepack_h1111](https://huggingface.co/maybleMyers/framepack_h1111) (except for FramePack-F1). Thank you maybleMyers!
|
| 47 |
+
|
| 48 |
+
### DiT Model
|
| 49 |
+
|
| 50 |
+
Choose one of the following methods:
|
| 51 |
+
|
| 52 |
+
1. **From lllyasviel's Hugging Face repo:** Download the three `.safetensors` files (starting with `diffusion_pytorch_model-00001-of-00003.safetensors`) from [lllyasviel/FramePackI2V_HY](https://huggingface.co/lllyasviel/FramePackI2V_HY). Specify the path to the first file (`...-00001-of-00003.safetensors`) as the `--dit` argument. For FramePack-F1, download from [lllyasviel/FramePack_F1_I2V_HY_20250503](https://huggingface.co/lllyasviel/FramePack_F1_I2V_HY_20250503).
|
| 53 |
+
|
| 54 |
+
2. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the model might be downloaded locally. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--lllyasviel--FramePackI2V_HY/snapshots/<hex-uuid-folder>`. FramePack-F1 is also available in the same way.
|
| 55 |
+
|
| 56 |
+
3. **From Kijai's Hugging Face repo:** Download the single file `FramePackI2V_HY_bf16.safetensors` from [Kijai/HunyuanVideo_comfy](https://huggingface.co/Kijai/HunyuanVideo_comfy/blob/main/FramePackI2V_HY_bf16.safetensors). Specify the path to this file as the `--dit` argument. No FramePack-F1 model is available here currently.
|
| 57 |
+
|
| 58 |
+
### VAE Model
|
| 59 |
+
|
| 60 |
+
Choose one of the following methods:
|
| 61 |
+
|
| 62 |
+
1. **Use official HunyuanVideo VAE:** Follow the instructions in the main [README.md](../README.md#model-download).
|
| 63 |
+
2. **From hunyuanvideo-community Hugging Face repo:** Download `vae/diffusion_pytorch_model.safetensors` from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo).
|
| 64 |
+
3. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the VAE might be downloaded locally within the HunyuanVideo community model snapshot. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/<hex-uuid-folder>`.
|
| 65 |
+
|
| 66 |
+
### Text Encoder 1 (LLaMA) Model
|
| 67 |
+
|
| 68 |
+
Choose one of the following methods:
|
| 69 |
+
|
| 70 |
+
1. **From Comfy-Org Hugging Face repo:** Download `split_files/text_encoders/llava_llama3_fp16.safetensors` from [Comfy-Org/HunyuanVideo_repackaged](https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged).
|
| 71 |
+
2. **From hunyuanvideo-community Hugging Face repo:** Download the four `.safetensors` files (starting with `text_encoder/model-00001-of-00004.safetensors`) from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo). Specify the path to the first file (`...-00001-of-00004.safetensors`) as the `--text_encoder1` argument.
|
| 72 |
+
3. **From local FramePack installation:** (Same as VAE) Specify the path to the HunyuanVideo community model snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/<hex-uuid-folder>`.
|
| 73 |
+
|
| 74 |
+
### Text Encoder 2 (CLIP) Model
|
| 75 |
+
|
| 76 |
+
Choose one of the following methods:
|
| 77 |
+
|
| 78 |
+
1. **From Comfy-Org Hugging Face repo:** Download `split_files/text_encoders/clip_l.safetensors` from [Comfy-Org/HunyuanVideo_repackaged](https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged).
|
| 79 |
+
2. **From hunyuanvideo-community Hugging Face repo:** Download `text_encoder_2/model.safetensors` from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo).
|
| 80 |
+
3. **From local FramePack installation:** (Same as VAE) Specify the path to the HunyuanVideo community model snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/<hex-uuid-folder>`.
|
| 81 |
+
|
| 82 |
+
### Image Encoder (SigLIP) Model
|
| 83 |
+
|
| 84 |
+
Choose one of the following methods:
|
| 85 |
+
|
| 86 |
+
1. **From Comfy-Org Hugging Face repo:** Download `sigclip_vision_patch14_384.safetensors` from [Comfy-Org/sigclip_vision_384](https://huggingface.co/Comfy-Org/sigclip_vision_384).
|
| 87 |
+
2. **From lllyasviel's Hugging Face repo:** Download `image_encoder/model.safetensors` from [lllyasviel/flux_redux_bfl](https://huggingface.co/lllyasviel/flux_redux_bfl).
|
| 88 |
+
3. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the model might be downloaded locally. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--lllyasviel--flux_redux_bfl/snapshots/<hex-uuid-folder>`.
|
| 89 |
+
|
| 90 |
+
<details>
|
| 91 |
+
<summary>日本語</summary>
|
| 92 |
+
|
| 93 |
+
※以下のページに重みが一括で公開されています(FramePack-F1を除く)。maybleMyers 氏に感謝いたします。: https://huggingface.co/maybleMyers/framepack_h1111
|
| 94 |
+
|
| 95 |
+
DiT、VAE、テキストエンコーダー1(LLaMA)、テキストエンコーダー2(CLIP)、および画像エンコーダー(SigLIP)モデルは複数の方法でダウンロードできます。英語の説明を参考にして、ダウンロードしてください。
|
| 96 |
+
|
| 97 |
+
FramePack公式のリポジトリをクローンして実行した場合、モデルはローカルにダウンロードされている可能性があります。スナップショットディレクトリへのパスを指定してください。例:`path/to/FramePack/hf_download/hub/models--lllyasviel--flux_redux_bfl/snapshots/<hex-uuid-folder>`
|
| 98 |
+
|
| 99 |
+
HunyuanVideoの推論をComfyUIですでに行っている場合、いくつかのモデルはすでにダウンロードされている可能性があります。
|
| 100 |
+
</details>
|
| 101 |
+
|
| 102 |
+
## Pre-caching / 事前キャッシング
|
| 103 |
+
|
| 104 |
+
The default resolution for FramePack is 640x640. See [the source code](../src/musubi_tuner/frame_pack/bucket_tools.py) for the default resolution of each bucket.
|
| 105 |
+
|
| 106 |
+
The dataset for training must be a video dataset. Image datasets are not supported. You can train on videos of any length. Specify `frame_extraction` as `full` and set `max_frames` to a sufficiently large value. However, if the video is too long, you may run out of VRAM during VAE encoding.
|
| 107 |
+
|
| 108 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 109 |
+
|
| 110 |
+
Latent pre-caching uses a dedicated script for FramePack. You **must** provide the Image Encoder model.
|
| 111 |
+
|
| 112 |
+
```bash
|
| 113 |
+
python src/musubi_tuner/fpack_cache_latents.py \
|
| 114 |
+
--dataset_config path/to/toml \
|
| 115 |
+
--vae path/to/vae_model.safetensors \
|
| 116 |
+
--image_encoder path/to/image_encoder_model.safetensors \
|
| 117 |
+
--vae_chunk_size 32
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
Key differences from HunyuanVideo caching:
|
| 121 |
+
- Uses `fpack_cache_latents.py`.
|
| 122 |
+
- Requires the `--image_encoder` argument pointing to the downloaded SigLIP model.
|
| 123 |
+
- The script generates multiple cache files per video, each corresponding to a different section, with the section index appended to the filename (e.g., `..._frame_pos-0000-count_...` becomes `..._frame_pos-0000-0000-count_...`, `..._frame_pos-0000-0001-count_...`, etc.).
|
| 124 |
+
- Image embeddings are calculated using the Image Encoder and stored in the cache files alongside the latents.
|
| 125 |
+
|
| 126 |
+
For VRAM savings during VAE decoding, consider using `--vae_chunk_size` and `--vae_spatial_tile_sample_min_size`. If VRAM is overflowing and using shared memory, it is recommended to set `--vae_chunk_size` to 16 or 8 to lower Conv3D chunk size. If VRAM is still an issue, consider specifying `--vae_spatial_tile_sample_min_size` to 64 or 32. This option enables tiling during VAE encoding and decoding. `--vae_tiling` option is also available to enable tiling with the default tile size of 32.
|
| 127 |
+
|
| 128 |
+
Note that the quality may be slightly lower when using tiling. Chunking does not affect quality.
|
| 129 |
+
|
| 130 |
+
Specifying `--f1` is required for FramePack-F1 training. For one-frame training, specify `--one_frame`. If you change the presence of these options, please overwrite the existing cache without specifying `--skip_existing`.
|
| 131 |
+
|
| 132 |
+
`--one_frame_no_2x` and `--one_frame_no_4x` options are available for one-frame training, described in the next section.
|
| 133 |
+
|
| 134 |
+
**FramePack-F1 support:**
|
| 135 |
+
You can apply the FramePack-F1 sampling method by specifying `--f1` during caching. The training script also requires specifying `--f1` to change the options during sample generation.
|
| 136 |
+
|
| 137 |
+
By default, the sampling method used is Inverted anti-drifting (the same as during inference with the original FramePack model, using the latent and index in reverse order), described in the paper. You can switch to FramePack-F1 sampling (Vanilla sampling, using the temporally ordered latent and index) by specifying `--f1`.
|
| 138 |
+
|
| 139 |
+
<details>
|
| 140 |
+
<summary>日本語</summary>
|
| 141 |
+
|
| 142 |
+
FramePackのデフォルト解像度は640x640です。各バケットのデフォルト解像度については、[ソースコード](../src/musubi_tuner/frame_pack/bucket_tools.py)を参照してください。
|
| 143 |
+
|
| 144 |
+
画像データセットでの学習は行えません。また動画の長さによらず学習可能です。 `frame_extraction` に `full` を指定して、`max_frames` に十分に大きな値を指定してください。ただし、あまりにも長いとVAEのencodeでVRAMが不足する可能性があります。
|
| 145 |
+
|
| 146 |
+
latentの事前キャッシングはFramePack専用のスクリプトを使用します。画像エンコーダーモデルを指定する必要があります。
|
| 147 |
+
|
| 148 |
+
HunyuanVideoのキャッシングとの主な違いは次のとおりです。
|
| 149 |
+
- `fpack_cache_latents.py`を使用します。
|
| 150 |
+
- ダウンロードしたSigLIPモデルを指す`--image_encoder`引数が必要です。
|
| 151 |
+
- スクリプトは、各ビデオに対して複数のキャッシュファイルを生成します。各ファイルは異なるセクションに対応し、セクションインデックスがファイル名に追加されます(例:`..._frame_pos-0000-count_...`は`..._frame_pos-0000-0000-count_...`、`..._frame_pos-0000-0001-count_...`などになります)。
|
| 152 |
+
- 画像埋め込みは画像エンコーダーを使用して計算され、latentとともにキャッシュファイルに保存されます。
|
| 153 |
+
|
| 154 |
+
VAEのdecode時のVRAM節約のために、`--vae_chunk_size`と`--vae_spatial_tile_sample_min_size`を使用することを検討してください。VRAMがあふれて共有メモリを使用している場合には、`--vae_chunk_size`を16、8などに設定してConv3Dチャンクを有効にすることをお勧めします。VRAMがまだ不足する場合は、`--vae_spatial_tile_sample_min_size`を64、32などに指定してください。このオプションはVAEのエンコードとデコード時にタイリングを有効にします。`--vae_tiling`オプションも利用可能で、デフォルトのタイルサイズ32でタイル処理を有効にします。
|
| 155 |
+
|
| 156 |
+
タイリングを有効にすると品質はわずかに低下する可能性があります。チャンク処理は品質に影響しません。
|
| 157 |
+
|
| 158 |
+
FramePack-F1の学習を行う場合は`--f1`を指定してください。これらのオプションの有無を変更する場合には、`--skip_existing`を指定せずに既存のキャッシュを上書きしてください。
|
| 159 |
+
|
| 160 |
+
**FramePack-F1のサポート:**
|
| 161 |
+
キャッシュ時のオプションに`--f1`を指定することで、FramePack-F1のサンプリング方法を適用できます。学習スクリプトについても`--f1`を指定してサンプル生成時のオプションを変更する必要があります。
|
| 162 |
+
|
| 163 |
+
デフォルトでは、論文のサンプリング方法 Inverted anti-drifting (無印のFramePackの推論時と同じ、逆順の latent と index を使用)を使用します。`--f1`を指定すると FramePack-F1 の Vanilla sampling (時間順の latent と index を使用)に変更できます。
|
| 164 |
+
</details>
|
| 165 |
+
|
| 166 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 167 |
+
|
| 168 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 169 |
+
|
| 170 |
+
```bash
|
| 171 |
+
python src/musubi_tuner/fpack_cache_text_encoder_outputs.py \
|
| 172 |
+
--dataset_config path/to/toml \
|
| 173 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 174 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 175 |
+
--batch_size 16
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
Key differences from HunyuanVideo caching:
|
| 179 |
+
- Uses `fpack_cache_text_encoder_outputs.py`.
|
| 180 |
+
- Requires both `--text_encoder1` (LLaMA) and `--text_encoder2` (CLIP) arguments.
|
| 181 |
+
- Uses `--fp8_llm` option to run the LLaMA Text Encoder 1 in fp8 mode for VRAM savings (similar to `--fp8_t5` in Wan2.1).
|
| 182 |
+
- Saves LLaMA embeddings, attention mask, and CLIP pooler output to the cache file.
|
| 183 |
+
|
| 184 |
+
<details>
|
| 185 |
+
<summary>日本語</summary>
|
| 186 |
+
|
| 187 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 188 |
+
|
| 189 |
+
HunyuanVideoのキャッシングとの主な違いは次のとおりです。
|
| 190 |
+
- `fpack_cache_text_encoder_outputs.py`を使用します。
|
| 191 |
+
- LLaMAとCLIPの両方の引数が必要です。
|
| 192 |
+
- LLaMAテキストエンコーダー1をfp8モードで実行するための`--fp8_llm`オプションを使用します(Wan2.1の`--fp8_t5`に似ています)。
|
| 193 |
+
- LLaMAの埋め込み、アテンションマスク、CLIPのプーラー出力をキャッシュファイルに保存します。
|
| 194 |
+
|
| 195 |
+
</details>
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
## Training / 学習
|
| 199 |
+
|
| 200 |
+
### Training
|
| 201 |
+
|
| 202 |
+
Training uses a dedicated script `fpack_train_network.py`. Remember FramePack only supports I2V training.
|
| 203 |
+
|
| 204 |
+
```bash
|
| 205 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/fpack_train_network.py \
|
| 206 |
+
--dit path/to/dit_model \
|
| 207 |
+
--vae path/to/vae_model.safetensors \
|
| 208 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 209 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 210 |
+
--image_encoder path/to/image_encoder_model.safetensors \
|
| 211 |
+
--dataset_config path/to/toml \
|
| 212 |
+
--sdpa --mixed_precision bf16 \
|
| 213 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing \
|
| 214 |
+
--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 3.0 \
|
| 215 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 216 |
+
--network_module networks.lora_framepack --network_dim 32 \
|
| 217 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 218 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
If you use the command prompt (Windows, not PowerShell), you may need to write them in a single line, or use `^` instead of `\` at the end of each line to continue the command.
|
| 222 |
+
|
| 223 |
+
The maximum value for `--blocks_to_swap` is 36. The default resolution for FramePack is 640x640, which requires around 17GB of VRAM. If you run out of VRAM, consider lowering the dataset resolution.
|
| 224 |
+
|
| 225 |
+
Key differences from HunyuanVideo training:
|
| 226 |
+
- Uses `fpack_train_network.py`.
|
| 227 |
+
- `--f1` option is available for FramePack-F1 model training. You need to specify the FramePack-F1 model as `--dit`. This option only changes the sample generation during training. The training process itself is the same as the original FramePack model.
|
| 228 |
+
- **Requires** specifying `--vae`, `--text_encoder1`, `--text_encoder2`, and `--image_encoder`.
|
| 229 |
+
- **Requires** specifying `--network_module networks.lora_framepack`.
|
| 230 |
+
- Optional `--latent_window_size` argument (default 9, should match caching).
|
| 231 |
+
- Memory saving options like `--fp8` (for DiT) and `--fp8_llm` (for Text Encoder 1) are available. `--fp8_scaled` is recommended when using `--fp8` for DiT.
|
| 232 |
+
- `--vae_chunk_size` and `--vae_spatial_tile_sample_min_size` options are available for the VAE to prevent out-of-memory during sampling (similar to caching).
|
| 233 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 234 |
+
- If you encounter an error when the batch size is greater than 1 (especially when specifying `--sdpa` or `--xformers`, it will always result in an error), please specify `--split_attn`.
|
| 235 |
+
<!-- - Use `convert_lora.py` for converting the LoRA weights after training, similar to HunyuanVideo. -->
|
| 236 |
+
|
| 237 |
+
Training settings (learning rate, optimizers, etc.) are experimental. Feedback is welcome.
|
| 238 |
+
|
| 239 |
+
<details>
|
| 240 |
+
<summary>日本語</summary>
|
| 241 |
+
|
| 242 |
+
FramePackの学習は専用のスクリプト`fpack_train_network.py`を使用します。FramePackはI2V学習のみをサポートしています。
|
| 243 |
+
|
| 244 |
+
コマンド記述例は英語版を参考にしてください。WindowsでPowerShellではなくコマンドプロンプトを使用している場合、コマンドを1行で記述するか、各行の末尾に`\`の代わりに`^`を付けてコマンドを続ける必要があります。
|
| 245 |
+
|
| 246 |
+
`--blocks_to_swap`の最大値は36です。FramePackのデフォルト解像度(640x640)では、17GB程度のVRAMが必要です。VRAM容量が不足する場合は、データセットの解像度を下げてください。
|
| 247 |
+
|
| 248 |
+
HunyuanVideoの学習との主な違いは次のとおりです。
|
| 249 |
+
- `fpack_train_network.py`を使用します。
|
| 250 |
+
- FramePack-F1モデルの学習時には`--f1`を指定してください。この場合、`--dit`にFramePack-F1モデルを指定する必要があります。このオプションは学習時のサンプル生成時のみに影響し、学習プロセス自体は元のFramePackモデルと同じです。
|
| 251 |
+
- `--vae`、`--text_encoder1`、`--text_encoder2`、`--image_encoder`を指定する必要があります。
|
| 252 |
+
- `--network_module networks.lora_framepack`を指定する必要があります。
|
| 253 |
+
- 必要に応じて`--latent_window_size`引数(デフォルト9)を指定できます(キャッシング時と一致させる必要があります)。
|
| 254 |
+
- `--fp8`(DiT用)や`--fp8_llm`(テキストエンコーダー1用)などのメモリ節約オプションが利用可能です。`--fp8_scaled`を使用することをお勧めします。
|
| 255 |
+
- サンプル生成時にメモリ不足を防ぐため、VAE用の`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`、`--vae_tiling`オプションが利用可能です(キャッシング時と同様)。
|
| 256 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。
|
| 257 |
+
- バッチサイズが1より大きい場合にエラーが出た時には(特に`--sdpa`や`--xformers`を指定すると必ずエラーになります。)、`--split_attn`を指定してください。
|
| 258 |
+
|
| 259 |
+
</details>
|
| 260 |
+
|
| 261 |
+
## Inference
|
| 262 |
+
|
| 263 |
+
Inference uses a dedicated script `fpack_generate_video.py`.
|
| 264 |
+
|
| 265 |
+
```bash
|
| 266 |
+
python src/musubi_tuner/fpack_generate_video.py \
|
| 267 |
+
--dit path/to/dit_model \
|
| 268 |
+
--vae path/to/vae_model.safetensors \
|
| 269 |
+
--text_encoder1 path/to/text_encoder1 \
|
| 270 |
+
--text_encoder2 path/to/text_encoder2 \
|
| 271 |
+
--image_encoder path/to/image_encoder_model.safetensors \
|
| 272 |
+
--image_path path/to/start_image.jpg \
|
| 273 |
+
--prompt "A cat walks on the grass, realistic style." \
|
| 274 |
+
--video_size 512 768 --video_seconds 5 --fps 30 --infer_steps 25 \
|
| 275 |
+
--attn_mode sdpa --fp8_scaled \
|
| 276 |
+
--vae_chunk_size 32 \
|
| 277 |
+
--save_path path/to/save/dir --output_type both \
|
| 278 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 279 |
+
```
|
| 280 |
+
<!-- --embedded_cfg_scale 10.0 --guidance_scale 1.0 \ -->
|
| 281 |
+
|
| 282 |
+
Key differences from HunyuanVideo inference:
|
| 283 |
+
- Uses `fpack_generate_video.py`.
|
| 284 |
+
- `--f1` option is available for FramePack-F1 model inference (forward generation). You need to specify the FramePack-F1 model as `--dit`.
|
| 285 |
+
- **Requires** specifying `--vae`, `--text_encoder1`, `--text_encoder2`, and `--image_encoder`.
|
| 286 |
+
- **Requires** specifying `--image_path` for the starting frame.
|
| 287 |
+
- **Requires** specifying `--video_seconds` or `--video_sections`. `--video_seconds` specifies the length of the video in seconds, while `--video_sections` specifies the number of sections. If `--video_sections` is specified, `--video_seconds` is ignored.
|
| 288 |
+
- `--video_size` is the size of the generated video, height and width are specified in that order.
|
| 289 |
+
- `--prompt`: Prompt for generation.
|
| 290 |
+
- Optional `--latent_window_size` argument (default 9, should match caching and training).
|
| 291 |
+
- `--fp8_scaled` option is available for DiT to reduce memory usage. Quality may be slightly lower. `--fp8_llm` option is available to reduce memory usage of Text Encoder 1. `--fp8` alone is also an option for DiT but `--fp8_scaled` potentially offers better quality.
|
| 292 |
+
- LoRA loading options (`--lora_weight`, `--lora_multiplier`, `--include_patterns`, `--exclude_patterns`) are available. `--lycoris` is also supported.
|
| 293 |
+
- `--embedded_cfg_scale` (default 10.0) controls the distilled guidance scale.
|
| 294 |
+
- `--guidance_scale` (default 1.0) controls the standard classifier-free guidance scale. **Changing this from 1.0 is generally not recommended for the base FramePack model.**
|
| 295 |
+
- `--guidance_rescale` (default 0.0) is available but typically not needed.
|
| 296 |
+
- `--bulk_decode` option can decode all frames at once, potentially faster but uses more VRAM during decoding. `--vae_chunk_size` option is recommended to prevent out-of-memory errors.
|
| 297 |
+
- `--sample_solver` (default `unipc`) is available but only `unipc` is implemented.
|
| 298 |
+
- `--save_merged_model` option is available to save the DiT model after merging LoRA weights. Inference is skipped if this is specified.
|
| 299 |
+
- `--latent_paddings` option overrides the default padding for each section. Specify it as a comma-separated list of integers, e.g., `--latent_paddings 0,0,0,0`. This option is ignored if `--f1` is specified.
|
| 300 |
+
- `--custom_system_prompt` option overrides the default system prompt for the LLaMA Text Encoder 1. Specify it as a string. See [here](../src/musubi_tunerhunyuan_model/text_encoder.py#L152) for the default system prompt.
|
| 301 |
+
- `--rope_scaling_timestep_threshold` option is the RoPE scaling timestep threshold, default is None (disabled). If set, RoPE scaling is applied only when the timestep exceeds the threshold. Start with around 800 and adjust as needed. This option is intended for one-frame inference and may not be suitable for other cases.
|
| 302 |
+
- `--rope_scaling_factor` option is the RoPE scaling factor, default is 0.5, assuming a resolution of 2x. For 1.5x resolution, around 0.7 is recommended.
|
| 303 |
+
|
| 304 |
+
Other options like `--video_size`, `--fps`, `--infer_steps`, `--save_path`, `--output_type`, `--seed`, `--attn_mode`, `--blocks_to_swap`, `--vae_chunk_size`, `--vae_spatial_tile_sample_min_size` function similarly to HunyuanVideo/Wan2.1 where applicable. `--vae_tiling` option is also available.
|
| 305 |
+
|
| 306 |
+
`--output_type` supports `latent_images` in addition to the options available in HunyuanVideo/Wan2.1. This option saves the latent and image files in the specified directory.
|
| 307 |
+
|
| 308 |
+
The LoRA weights that can be specified in `--lora_weight` are not limited to the FramePack weights trained in this repository. You can also specify the HunyuanVideo LoRA weights from this repository and the HunyuanVideo LoRA weights from diffusion-pipe (automatic detection).
|
| 309 |
+
|
| 310 |
+
The maximum value for `--blocks_to_swap` is 38.
|
| 311 |
+
|
| 312 |
+
<details>
|
| 313 |
+
<summary>日本語</summary>
|
| 314 |
+
|
| 315 |
+
FramePackの推論は専用のスクリプト`fpack_generate_video.py`を使用します。コマンド記述例は英語版を参考にしてください。
|
| 316 |
+
|
| 317 |
+
HunyuanVideoの推論との主な違いは次のとおりです。
|
| 318 |
+
- `fpack_generate_video.py`を使用します。
|
| 319 |
+
- `--f1`を指定すると、FramePack-F1モデルの推論を行います(順方向で生成)。`--dit`にFramePack-F1モデルを指定する必要があります。
|
| 320 |
+
- `--vae`、`--text_encoder1`、`--text_encoder2`、`--image_encoder`を指定する必要があります。
|
| 321 |
+
- `--image_path`を指定する必要があります(開始フレーム)。
|
| 322 |
+
- `--video_seconds` または `--video_sections` を指定する必要があります。`--video_seconds`は秒単位でのビデオの長さを指定し、`--video_sections`はセクション数を指定します。`--video_sections`を指定した場合、`--video_seconds`は無視されます。
|
| 323 |
+
- `--video_size`は生成するビデオのサイズで、高さと幅をその順番で指定します。
|
| 324 |
+
- `--prompt`: 生成用のプロンプトです。
|
| 325 |
+
- 必要に応じて`--latent_window_size`引数(デフォルト9)を指定できます(キャッシング時、学習時と一致させる必要があります)。
|
| 326 |
+
- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。品質はやや低下する可能性があります。またText Encoder 1のメモリ使用量を削減するために、`--fp8_llm`オプションを指定可能です。DiT用に`--fp8`単独のオプションも用意されていますが、`--fp8_scaled`の方が品質が良い可能性があります。
|
| 327 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。
|
| 328 |
+
- `--embedded_cfg_scale`(デフォルト10.0)は、蒸留されたガイダンススケールを制御します。通常は変更しないでください。
|
| 329 |
+
- `--guidance_scale`(デフォルト1.0)は、標準の分類器フリーガイダンススケールを制御します。**FramePackモデルのベースモデルでは、通常1.0から変更しないことをお勧めします。**
|
| 330 |
+
- `--guidance_rescale`(デフォルト0.0)も利用可能ですが、通常は必要ありません。
|
| 331 |
+
- `--bulk_decode`オプションは、すべてのフレームを一度にデコードできるオプションです。高速ですが、デコード中にVRAMを多く使用します。VRAM不足エラーを防ぐために、`--vae_chunk_size`オプションを指定することをお勧めします。
|
| 332 |
+
- `--sample_solver`(デフォルト`unipc`)は利用可能ですが、`unipc`のみが実装されています。
|
| 333 |
+
- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。
|
| 334 |
+
- `--latent_paddings`オプションは、各セクションのデフォルトのパディングを上書きします。カンマ区切りの整数リストとして指定します。例:`--latent_paddings 0,0,0,0`。`--f1`を指定した場合は無視されます。
|
| 335 |
+
- `--custom_system_prompt`オプションは、LLaMA Text Encoder 1のデフォルトのシステムプロンプトを上書きします。文字列として指定します。デフォルトのシステムプロンプトは[こちら](../src/musubi_tuner/hunyuan_model/text_encoder.py#L152)を参照してください。
|
| 336 |
+
- `--rope_scaling_timestep_threshold`オプションはRoPEスケーリングのタイムステップ閾値で、デフォルトはNone(無効)です。設定すると、タイムステップが閾値以上の場合にのみRoPEスケーリングが適用されます。800程度から初めて調整してください。1フレーム推論時での使用を想定しており、それ以外の場合は想定していません。
|
| 337 |
+
- `--rope_scaling_factor`オプションはRoPEスケーリング係数で、デフォルトは0.5で、解像度が2倍の場合を想定しています。1.5倍なら0.7程度が良いでしょう。
|
| 338 |
+
|
| 339 |
+
`--video_size`、`--fps`、`--infer_steps`、`--save_path`、`--output_type`、`--seed`、`--attn_mode`、`--blocks_to_swap`、`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`などの他のオプションは、HunyuanVideo/Wan2.1と同様に機能します。また`--vae_tiling`オプションも利用可能です。
|
| 340 |
+
|
| 341 |
+
`--output_type`はHunyuanVideo/Wan2.1で利用可能なオプションに加えて、`latent_images`をサポートしています。このオプションは、指定されたディレクトリにlatentと画像ファイルを保存します。
|
| 342 |
+
|
| 343 |
+
`--lora_weight`に指定できるLoRAの重みは、当リポジトリで学習したFramePackの重み以外に、当リポジトリのHunyuanVideoのLoRA、diffusion-pipeのHunyuanVideoのLoRAが指定可能です(自動判定)。
|
| 344 |
+
|
| 345 |
+
`--blocks_to_swap`の最大値は38です。
|
| 346 |
+
</details>
|
| 347 |
+
|
| 348 |
+
## Batch and Interactive Modes / バッチモードとインタラクティブモード
|
| 349 |
+
|
| 350 |
+
In addition to single video generation, FramePack now supports batch generation from file and interactive prompt input:
|
| 351 |
+
|
| 352 |
+
### Batch Mode from File / ファイルからのバッチモード
|
| 353 |
+
|
| 354 |
+
Generate multiple videos from prompts stored in a text file:
|
| 355 |
+
|
| 356 |
+
```bash
|
| 357 |
+
python src/musubi_tuner/fpack_generate_video.py --from_file prompts.txt
|
| 358 |
+
--dit path/to/dit_model --vae path/to/vae_model.safetensors
|
| 359 |
+
--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2
|
| 360 |
+
--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory
|
| 361 |
+
```
|
| 362 |
+
|
| 363 |
+
The prompts file format:
|
| 364 |
+
- One prompt per line
|
| 365 |
+
- Empty lines and lines starting with # are ignored (comments)
|
| 366 |
+
- Each line can include prompt-specific parameters using command-line style format:
|
| 367 |
+
|
| 368 |
+
```
|
| 369 |
+
A beautiful sunset over mountains --w 832 --h 480 --f 5 --d 42 --s 20 --i path/to/start_image.jpg
|
| 370 |
+
A busy city street at night --w 480 --h 832 --i path/to/another_start.jpg
|
| 371 |
+
```
|
| 372 |
+
|
| 373 |
+
Supported inline parameters (if omitted, default values from the command line are used):
|
| 374 |
+
- `--w`: Width
|
| 375 |
+
- `--h`: Height
|
| 376 |
+
- `--f`: Video seconds
|
| 377 |
+
- `--d`: Seed
|
| 378 |
+
- `--s`: Inference steps
|
| 379 |
+
- `--g` or `--l`: Guidance scale
|
| 380 |
+
- `--i`: Image path (for start image)
|
| 381 |
+
- `--im`: Image mask path
|
| 382 |
+
- `--n`: Negative prompt
|
| 383 |
+
- `--vs`: Video sections
|
| 384 |
+
- `--ei`: End image path
|
| 385 |
+
- `--ci`: Control image path (explained in one-frame inference documentation)
|
| 386 |
+
- `--cim`: Control image mask path (explained in one-frame inference documentation)
|
| 387 |
+
- `--of`: One frame inference mode options (same as `--one_frame_inference` in the command line), options for one-frame inference
|
| 388 |
+
|
| 389 |
+
In batch mode, models are loaded once and reused for all prompts, significantly improving overall generation time compared to multiple single runs.
|
| 390 |
+
|
| 391 |
+
### Interactive Mode / インタラクティブモード
|
| 392 |
+
|
| 393 |
+
Interactive command-line interface for entering prompts:
|
| 394 |
+
|
| 395 |
+
```bash
|
| 396 |
+
python src/musubi_tuner/fpack_generate_video.py --interactive
|
| 397 |
+
--dit path/to/dit_model --vae path/to/vae_model.safetensors
|
| 398 |
+
--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2
|
| 399 |
+
--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory
|
| 400 |
+
```
|
| 401 |
+
|
| 402 |
+
In interactive mode:
|
| 403 |
+
- Enter prompts directly at the command line
|
| 404 |
+
- Use the same inline parameter format as batch mode
|
| 405 |
+
- Use Ctrl+D (or Ctrl+Z on Windows) to exit
|
| 406 |
+
- Models remain loaded between generations for efficiency
|
| 407 |
+
|
| 408 |
+
<details>
|
| 409 |
+
<summary>日本語</summary>
|
| 410 |
+
|
| 411 |
+
単一動画の生成に加えて、FramePackは現在、ファイルからのバッチ生成とインタラクティブなプロンプト入力をサポートしています。
|
| 412 |
+
|
| 413 |
+
#### ファイルからのバッチモード
|
| 414 |
+
|
| 415 |
+
テキストファイルに保存されたプロンプトから複数の動画を生成します:
|
| 416 |
+
|
| 417 |
+
```bash
|
| 418 |
+
python src/musubi_tuner/fpack_generate_video.py --from_file prompts.txt
|
| 419 |
+
--dit path/to/dit_model --vae path/to/vae_model.safetensors
|
| 420 |
+
--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2
|
| 421 |
+
--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
プロンプトファイルの形式(サンプルは英語ドキュメントを参照):
|
| 425 |
+
- 1行に1つのプロンプト
|
| 426 |
+
- 空行や#で始まる行は無視されます(コメント)
|
| 427 |
+
- 各行にはコマンドライン形式でプロンプト固有のパラメータを含めることができます:
|
| 428 |
+
|
| 429 |
+
サポートされているインラインパラメータ(省略した場合、コマンドラインのデフォルト値が使用されます)
|
| 430 |
+
- `--w`: 幅
|
| 431 |
+
- `--h`: 高さ
|
| 432 |
+
- `--f`: 動画の秒数
|
| 433 |
+
- `--d`: シード
|
| 434 |
+
- `--s`: 推論ステップ
|
| 435 |
+
- `--g` または `--l`: ガイダンススケール
|
| 436 |
+
- `--i`: 画像パス(開始画像用)
|
| 437 |
+
- `--im`: 画像マスクパス
|
| 438 |
+
- `--n`: ネガティブプロンプト
|
| 439 |
+
- `--vs`: 動画セクション数
|
| 440 |
+
- `--ei`: 終了画像パス
|
| 441 |
+
- `--ci`: 制御画像パス(1フレーム推論のドキュメントで解説)
|
| 442 |
+
- `--cim`: 制御画像マスクパス(1フレーム推論のドキュメントで解説)
|
| 443 |
+
- `--of`: 1フレーム推論モードオプション(コマンドラインの`--one_frame_inference`と同様、1フレーム推論のオプション��
|
| 444 |
+
|
| 445 |
+
バッチモードでは、モデルは一度だけロードされ、すべてのプロンプトで再利用されるため、複数回の単一実行と比較して全体的な生成時間が大幅に改善されます。
|
| 446 |
+
|
| 447 |
+
#### インタラクティブモード
|
| 448 |
+
|
| 449 |
+
プロンプトを入力するためのインタラクティブなコマンドラインインターフェース:
|
| 450 |
+
|
| 451 |
+
```bash
|
| 452 |
+
python src/musubi_tuner/fpack_generate_video.py --interactive
|
| 453 |
+
--dit path/to/dit_model --vae path/to/vae_model.safetensors
|
| 454 |
+
--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2
|
| 455 |
+
--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory
|
| 456 |
+
```
|
| 457 |
+
|
| 458 |
+
インタラクティブモードでは:
|
| 459 |
+
- コマンドラインで直接プロンプトを入力
|
| 460 |
+
- バッチモードと同じインラインパラメータ形式を使用
|
| 461 |
+
- 終了するには Ctrl+D (Windowsでは Ctrl+Z) を使用
|
| 462 |
+
- 効率のため、モデルは生成間で読み込まれたままになります
|
| 463 |
+
</details>
|
| 464 |
+
|
| 465 |
+
## Advanced Video Control Features (Experimental) / 高度なビデオ制御機能(実験的)
|
| 466 |
+
|
| 467 |
+
This section describes experimental features added to the `fpack_generate_video.py` script to provide finer control over the generated video content, particularly useful for longer videos or sequences requiring specific transitions or states. These features leverage the Inverted Anti-drifting sampling method inherent to FramePack.
|
| 468 |
+
|
| 469 |
+
### **1. End Image Guidance (`--end_image_path`)**
|
| 470 |
+
|
| 471 |
+
* **Functionality:** Guides the generation process to make the final frame(s) of the video resemble a specified target image.
|
| 472 |
+
* **Usage:** `--end_image_path <path_to_image_file>`
|
| 473 |
+
* **Mechanism:** The provided image is encoded using the VAE. This latent representation is used as a target or starting point during the generation of the final video section (which is the first step in Inverted Anti-drifting).
|
| 474 |
+
* **Use Cases:** Defining a clear ending for the video, such as a character striking a specific pose or a product appearing in a close-up.
|
| 475 |
+
|
| 476 |
+
This option is ignored if `--f1` is specified. The end image is not used in the FramePack-F1 model.
|
| 477 |
+
|
| 478 |
+
### **2. Section Start Image Guidance (`--image_path` Extended Format)**
|
| 479 |
+
|
| 480 |
+
* **Functionality:** Guides specific sections within the video to start with a visual state close to a provided image.
|
| 481 |
+
* You can force the start image by setting `--latent_paddings` to `0,0,0,0` (specify the number of sections as a comma-separated list). If `latent_paddings` is set to 1 or more, the specified image will be used as a reference image (default behavior).
|
| 482 |
+
* **Usage:** `--image_path "SECTION_SPEC:path/to/image.jpg;;;SECTION_SPEC:path/to/another.jpg;;;..."`
|
| 483 |
+
* `SECTION_SPEC`: Defines the target section(s). Rules:
|
| 484 |
+
* `0`: The first section of the video (generated last in Inverted Anti-drifting).
|
| 485 |
+
* `-1`: The last section of the video (generated first).
|
| 486 |
+
* `N` (non-negative integer): The N-th section (0-indexed).
|
| 487 |
+
* `-N` (negative integer): The N-th section from the end.
|
| 488 |
+
* `S-E` (range, e.g., `0-2`): Applies the same image guidance to sections S through E (inclusive).
|
| 489 |
+
* Use `;;;` as a separator between definitions.
|
| 490 |
+
* If no image is specified for a section, generation proceeds based on the prompt and preceding (future time) section context.
|
| 491 |
+
* **Mechanism:** When generating a specific section, if a corresponding start image is provided, its VAE latent representation is strongly referenced as the "initial state" for that section. This guides the beginning of the section towards the specified image while attempting to maintain temporal consistency with the subsequent (already generated) section.
|
| 492 |
+
* **Use Cases:** Defining clear starting points for scene changes, specifying character poses or attire at the beginning of certain sections.
|
| 493 |
+
|
| 494 |
+
### **3. Section-Specific Prompts (`--prompt` Extended Format)**
|
| 495 |
+
|
| 496 |
+
* **Functionality:** Allows providing different text prompts for different sections of the video, enabling more granular control over the narrative or action flow.
|
| 497 |
+
* **Usage:** `--prompt "SECTION_SPEC:Prompt text for section(s);;;SECTION_SPEC:Another prompt;;;..."`
|
| 498 |
+
* `SECTION_SPEC`: Uses the same rules as `--image_path`.
|
| 499 |
+
* Use `;;;` as a separator.
|
| 500 |
+
* If a prompt for a specific section is not provided, the prompt associated with index `0` (or the closest specified applicable prompt) is typically used. Check behavior if defaults are critical.
|
| 501 |
+
* **Mechanism:** During the generation of each section, the corresponding section-specific prompt is used as the primary textual guidance for the model.
|
| 502 |
+
* **Prompt Content Recommendation** when using `--latent_paddings 0,0,0,0` without `--f1` (original FramePack model):
|
| 503 |
+
* Recall that FramePack uses Inverted Anti-drifting and references future context.
|
| 504 |
+
* It is recommended to describe "**the main content or state change that should occur in the current section, *and* the subsequent events or states leading towards the end of the video**" in the prompt for each section.
|
| 505 |
+
* Including the content of subsequent sections in the current section's prompt helps the model maintain context and overall coherence.
|
| 506 |
+
* Example: For section 1, the prompt might describe what happens in section 1 *and* briefly summarize section 2 (and beyond).
|
| 507 |
+
* However, based on observations (e.g., the `latent_paddings` comment), the model's ability to perfectly utilize very long-term context might be limited. Experimentation is key. Describing just the "goal for the current section" might also work. Start by trying the "section and onwards" approach.
|
| 508 |
+
* Use the default prompt when `latent_paddings` is >= 1 or `--latent_paddings` is not specified, or when using `--f1` (FramePack-F1 model).
|
| 509 |
+
* **Use Cases:** Describing evolving storylines, gradual changes in character actions or emotions, step-by-step processes over time.
|
| 510 |
+
|
| 511 |
+
### **Combined Usage Example** (with `--f1` not specified)
|
| 512 |
+
|
| 513 |
+
Generating a 3-section video of "A dog runs towards a thrown ball, catches it, and runs back":
|
| 514 |
+
|
| 515 |
+
```bash
|
| 516 |
+
python src/musubi_tuner/fpack_generate_video.py \
|
| 517 |
+
--prompt "0:A dog runs towards a thrown ball, catches it, and runs back;;;1:The dog catches the ball and then runs back towards the viewer;;;2:The dog runs back towards the viewer holding the ball" \
|
| 518 |
+
--image_path "0:./img_start_running.png;;;1:./img_catching.png;;;2:./img_running_back.png" \
|
| 519 |
+
--end_image_path ./img_returned.png \
|
| 520 |
+
--save_path ./output \
|
| 521 |
+
# ... other arguments
|
| 522 |
+
```
|
| 523 |
+
|
| 524 |
+
* **Generation Order:** Section 2 -> Section 1 -> Section 0
|
| 525 |
+
* **Generating Section 2:**
|
| 526 |
+
* Prompt: "The dog runs back towards the viewer holding the ball"
|
| 527 |
+
* Start Image: `./img_running_back.png`
|
| 528 |
+
* End Image: `./img_returned.png` (Initial target)
|
| 529 |
+
* **Generating Section 1:**
|
| 530 |
+
* Prompt: "The dog catches the ball and then runs back towards the viewer"
|
| 531 |
+
* Start Image: `./img_catching.png`
|
| 532 |
+
* Future Context: Generated Section 2 latent
|
| 533 |
+
* **Generating Section 0:**
|
| 534 |
+
* Prompt: "A dog runs towards a thrown ball, catches it, and runs back"
|
| 535 |
+
* Start Image: `./img_start_running.png`
|
| 536 |
+
* Future Context: Generated Section 1 & 2 latents
|
| 537 |
+
|
| 538 |
+
### **Important Considerations**
|
| 539 |
+
|
| 540 |
+
* **Inverted Generation:** Always remember that generation proceeds from the end of the video towards the beginning. Section `-1` (the last section, `2` in the example) is generated first.
|
| 541 |
+
* **Continuity vs. Guidance:** While start image guidance is powerful, drastically different images between sections might lead to unnatural transitions. Balance guidance strength with the need for smooth flow.
|
| 542 |
+
* **Prompt Optimization:** The prompt content recommendation is a starting point. Fine-tune prompts based on observed model behavior and desired output quality.
|
| 543 |
+
|
| 544 |
+
<details>
|
| 545 |
+
<summary>日本語</summary>
|
| 546 |
+
|
| 547 |
+
### **高度な動画制御機能(実験的)**
|
| 548 |
+
|
| 549 |
+
このセクションでは、`fpack_generate_video.py` スクリプトに追加された実験的な機能について説明します。これらの機能は、生成される動画の内容をより詳細に制御するためのもので、特に長い動画や特定の遷移・状態が必要なシーケンスに役立ちます。これらの機能は、FramePack固有のInverted Anti-driftingサンプリング方式を活用しています。
|
| 550 |
+
|
| 551 |
+
#### **1. 終端画像ガイダンス (`--end_image_path`)**
|
| 552 |
+
|
| 553 |
+
* **機能:** 動画の最後のフレーム(群)を指定したターゲット画像に近づけるように生成を誘導します。
|
| 554 |
+
* **書式:** `--end_image_path <画像ファイルパス>`
|
| 555 |
+
* **動作:** 指定された画像はVAEでエンコードされ、その潜在表現が動画の最終セクション(Inverted Anti-driftingでは最初に生成される)の生成時の目標または開始点として使用されます。
|
| 556 |
+
* **用途:** キャラクターが特定のポーズで終わる、特定の商品がクローズアップで終わるなど、動画の結末を明確に定義する場合。
|
| 557 |
+
|
| 558 |
+
このオプションは、`--f1`を指定した場合は無視されます。FramePack-F1モデルでは終端画像は使用されません。
|
| 559 |
+
|
| 560 |
+
#### **2. セクション開始画像ガイダンス (`--image_path` 拡張書式)**
|
| 561 |
+
|
| 562 |
+
* **機能:** 動画内の特定のセクションが、指定された画像に近い視覚状態から始まるように誘導します。
|
| 563 |
+
* `--latent_paddings`を`0,0,0,0`(カンマ区切りでセクション数だけ指定)に設定することで、セクションの開始画像を強制できます。`latent_paddings`が1以上の場合、指定された画像は参照画像として使用されます。
|
| 564 |
+
* **書式:** `--image_path "セクション指定子:画像パス;;;セクション指定子:別の画像パス;;;..."`
|
| 565 |
+
* `セクション指定子`: 対象セクションを定義します。ルール:
|
| 566 |
+
* `0`: 動画の最初のセクション(Inverted Anti-driftingでは最後に生成)。
|
| 567 |
+
* `-1`: 動画の最後のセクション(最初に生��)。
|
| 568 |
+
* `N`(非負整数): N番目のセクション(0始まり)。
|
| 569 |
+
* `-N`(負整数): 最後からN番目のセクション。
|
| 570 |
+
* `S-E`(範囲, 例:`0-2`): セクションSからE(両端含む)に同じ画像を適用。
|
| 571 |
+
* 区切り文字は `;;;` です。
|
| 572 |
+
* セクションに画像が指定されていない場合、プロンプトと後続(未来時刻)セクションのコンテキストに基づいて生成されます。
|
| 573 |
+
* **動作:** 特定セクションの生成時、対応する開始画像が指定されていれば、そのVAE潜在表現がそのセクションの「初期状態」として強く参照されます。これにより、後続(生成済み)セクションとの時間的連続性を維持しようとしつつ、セクションの始まりを指定画像に近づけます。
|
| 574 |
+
* **用途:** シーン変更の起点を明確にする、特定のセクション開始時のキャラクターのポーズや服装を指定するなど。
|
| 575 |
+
|
| 576 |
+
#### **3. セクション別プロンプト (`--prompt` 拡張書式)**
|
| 577 |
+
|
| 578 |
+
* **機能:** 動画のセクションごとに異なるテキストプロンプトを与え、物語やアクションの流れをより細かく指示できます。
|
| 579 |
+
* **書式:** `--prompt "セクション指定子:プロンプトテキスト;;;セクション指定子:別のプロンプト;;;..."`
|
| 580 |
+
* `セクション指定子`: `--image_path` と同じルールです。
|
| 581 |
+
* 区切り文字は `;;;` です。
|
| 582 |
+
* 特定セクションのプロンプトがない場合、通常はインデックス`0`に関連付けられたプロンプト(または最も近い適用可能な指定プロンプト)が使用されます。デフォルトの挙動が重要な場合は確認してください。
|
| 583 |
+
* **動作:** 各セクションの生成時、対応するセクション別プロンプトがモデルへの主要なテキスト指示として使用されます。
|
| 584 |
+
* `latent_paddings`に`0`を指定した場合(非F1モデル)の **プロンプト内容の推奨:**
|
| 585 |
+
* FramePackはInverted Anti-driftingを採用し、未来のコンテキストを参照することを思い出してください。
|
| 586 |
+
* 各セクションのプロンプトには、「**現在のセクションで起こるべき主要な内容や状態変化、*および*それに続く動画の終端までの内容**」を記述することを推奨します。
|
| 587 |
+
* 現在のセクションのプロンプトに後続セクションの内容を含めることで、モデルが全体的な文脈を把握し、一貫性を保つのに役立ちます。
|
| 588 |
+
* 例:セクション1のプロンプトには、セクション1の内容 *と* セクション2の簡単な要約を記述します。
|
| 589 |
+
* ただし、モデルの長期コンテキスト完全利用能力には限界がある可能性も示唆されています(例:`latent_paddings`コメント)。実験が鍵となります。「現在のセクションの目標」のみを記述するだけでも機能する場合があります。まずは「セクションと以降」アプローチを試すことをお勧めします。
|
| 590 |
+
* 使用するプロンプトは、`latent_paddings`が`1`以上または指定されていない場合、または`--f1`(FramePack-F1モデル)を使用している場合は、通常のプロンプト内容を記述してください。
|
| 591 |
+
* **用途:** 時間経過に伴うストーリーの変化、キャラクターの行動や感情の段階的な変化、段階的なプロセスなどを記述する場合。
|
| 592 |
+
|
| 593 |
+
#### **組み合わせ使用例** (`--f1`未指定時)
|
| 594 |
+
|
| 595 |
+
「投げられたボールに向かって犬が走り、それを捕まえ、走って戻ってくる」3セクション動画の生成:
|
| 596 |
+
(コマンド記述例は英語版を参考にしてください)
|
| 597 |
+
|
| 598 |
+
* **生成順序:** セクション2 → セクション1 → セクション0
|
| 599 |
+
* **セクション2生成時:**
|
| 600 |
+
* プロンプト: "犬がボールを咥えてこちらに向かって走ってくる"
|
| 601 |
+
* 開始画像: `./img_running_back.png`
|
| 602 |
+
* 終端画像: `./img_returned.png` (初期目標)
|
| 603 |
+
* **セクション1生成時:**
|
| 604 |
+
* プロンプト: "犬がボールを捕まえ、その後こちらに向かって走ってくる"
|
| 605 |
+
* 開始画像: `./img_catching.png`
|
| 606 |
+
* 未来コンテキスト: 生成済みセクション2の潜在表現
|
| 607 |
+
* **セクション0生成時:**
|
| 608 |
+
* プロンプト: "犬が投げられたボールに向かって走り、それを捕まえ、走って戻ってくる"
|
| 609 |
+
* 開始画像: `./img_start_running.png`
|
| 610 |
+
* 未来コンテキスト: 生成済みセクション1 & 2の潜在表現
|
| 611 |
+
|
| 612 |
+
#### **重要な考慮事項**
|
| 613 |
+
|
| 614 |
+
* **逆順生成:** 生成は動画の終わりから始まりに向かって進むことを常に意識してください。セクション`-1`(最後のセクション、上の例では `2`)が最初に生成されます。
|
| 615 |
+
* **連続性とガイダンスのバランス:** 開始画像ガイダンスは強力ですが、セクション間で画像が大きく異なると、遷移が不自然になる可能性があります。ガイダンスの強さとスムーズな流れの必要性のバランスを取ってください。
|
| 616 |
+
* **プロンプトの最適化:** 推奨されるプロンプト内容はあくまでも参考です。モデルの観察された挙動と望ましい出力品質に基づいてプロンプトを微調整してください。
|
| 617 |
+
|
| 618 |
+
</details>
|
docs/framepack_1f.md
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FramePack One Frame (Single Frame) Inference and Training / FramePack 1フレーム推論と学習
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document explains advanced inference and training methods using the FramePack model, particularly focusing on **"1-frame inference"** and its extensions. These features aim to leverage FramePack's flexibility to enable diverse image generation and editing tasks beyond simple video generation.
|
| 6 |
+
|
| 7 |
+
### The Concept and Development of 1-Frame Inference
|
| 8 |
+
|
| 9 |
+
While FramePack is originally a model for generating sequential video frames (or frame sections), it was discovered that by focusing on its internal structure, particularly how it handles temporal information with RoPE (Rotary Position Embedding), interesting control over single-frame generation is possible.
|
| 10 |
+
|
| 11 |
+
1. **Basic 1-Frame Inference**:
|
| 12 |
+
* It takes an initial image and a prompt as input, limiting the number of generated frames to just one.
|
| 13 |
+
* In this process, by intentionally setting a large RoPE timestamp (`target_index`) for the single frame to be generated, a single static image can be obtained that reflects temporal and semantic changes from the initial image according to the prompt.
|
| 14 |
+
* This utilizes FramePack's characteristic of being highly sensitive to RoPE timestamps, as it supports bidirectional contexts like "Inverted anti-drifting." This allows for operations similar to natural language-based image editing, albeit in a limited capacity, without requiring additional training.
|
| 15 |
+
|
| 16 |
+
2. **Kisekaeichi Method (Feature Merging via Post-Reference)**:
|
| 17 |
+
* This method, an extension of basic 1-frame inference, was **proposed by furusu**. In addition to the initial image, it also uses a reference image corresponding to a "next section-start image" (treated as `clean_latent_post`) as input.
|
| 18 |
+
* The RoPE timestamp (`target_index`) for the image to be generated is set to an intermediate value between the timestamps of the initial image and the section-end image.
|
| 19 |
+
* More importantly, masking (e.g., zeroing out specific regions) is applied to the latent representation of each reference image. For example, by setting masks to extract a character's face and body shape from the initial image and clothing textures from the reference image, an image can be generated that fuses the desired features of both, similar to a character "dress-up" or outfit swapping. This method can also be fundamentally achieved without additional training.
|
| 20 |
+
|
| 21 |
+
3. **1f-mc (one frame multi-control) Method (Proximal Frame Blending)**:
|
| 22 |
+
* This method was **proposed by mattyamonaca**. It takes two reference images as input: an initial image (e.g., at `t=0`) and a subsequent image (e.g., at `t=1`, the first frame of a section), and generates a single image blending their features.
|
| 23 |
+
* Unlike Kisekaeichi, latent masking is typically not performed.
|
| 24 |
+
* To fully leverage this method, additional training using LoRA (Low-Rank Adaptation) is recommended. Through training, the model can better learn the relationship and blending method between the two input images to achieve specific editing effects.
|
| 25 |
+
|
| 26 |
+
### Integration into a Generalized Control Framework
|
| 27 |
+
|
| 28 |
+
The concepts utilized in the methods above—specifying reference images, manipulating timestamps, and applying latent masks—have been generalized to create a more flexible control framework.
|
| 29 |
+
Users can arbitrarily specify the following elements for both inference and LoRA training:
|
| 30 |
+
|
| 31 |
+
* **Control Images**: Any set of input images intended to influence the model.
|
| 32 |
+
* **Clean Latent Index (Indices)**: Timestamps corresponding to each control image. These are treated as `clean latent index` internally by FramePack and can be set to any position on the time axis. This is specified as `control_index`.
|
| 33 |
+
* **Latent Masks**: Masks applied to the latent representation of each control image, allowing selective control over which features from the control images are utilized. This is specified as `control_image_mask_path` or the alpha channel of the control image.
|
| 34 |
+
* **Target Index**: The timestamp for the single frame to be generated.
|
| 35 |
+
|
| 36 |
+
This generalized control framework, along with corresponding extensions to the inference and LoRA training tools, has enabled advanced applications such as:
|
| 37 |
+
|
| 38 |
+
* Development of LoRAs that stabilize 1-frame inference effects (e.g., a camera orbiting effect) that were previously unstable with prompts alone.
|
| 39 |
+
* Development of Kisekaeichi LoRAs that learn to perform desired feature merging under specific conditions (e.g., ignoring character information from a clothing reference image), thereby automating the masking process through learning.
|
| 40 |
+
|
| 41 |
+
These features maximize FramePack's potential and open up new creative possibilities in static image generation and editing. Subsequent sections will detail the specific options for utilizing these functionalities.
|
| 42 |
+
|
| 43 |
+
<details>
|
| 44 |
+
<summary>日本語</summary>
|
| 45 |
+
|
| 46 |
+
このドキュメントでは、FramePackモデルを用いた高度な推論および学習手法、特に「1フレーム推論」��その拡張機能について解説します。これらの機能は、FramePackの柔軟性を活かし、動画生成に留まらない多様な画像生成・編集タスクを実現することを目的としています。
|
| 47 |
+
|
| 48 |
+
### 1フレーム推論の発想と発展
|
| 49 |
+
|
| 50 |
+
FramePackは本来、連続する動画フレーム(またはフレームセクション)を生成するモデルですが、その内部構造、特に時間情報を扱うRoPE (Rotary Position Embedding) の扱いに着目することで、単一フレームの生成においても興味深い制御が可能になることが発見されました。
|
| 51 |
+
|
| 52 |
+
1. **基本的な1フレーム推論**:
|
| 53 |
+
* 開始画像とプロンプトを入力とし、生成するフレーム数を1フレームに限定します。
|
| 54 |
+
* この際、生成する1フレームに割り当てるRoPEのタイムスタンプ(`target_index`)を意図的に大きな値に設定することで、開始画像からプロンプトに従って時間的・意味的に変化した単一の静止画を得ることができます。
|
| 55 |
+
* これは、FramePackがInverted anti-driftingなどの双方向コンテキストに対応するため、RoPEのタイムスタンプに対して敏感に反応する特性を利用したものです。これにより、学習なしで限定的ながら自然言語による画像編集に近い操作が可能です。
|
| 56 |
+
|
| 57 |
+
2. **kisekaeichi方式 (ポスト参照による特徴マージ)**:
|
| 58 |
+
* 基本的な1フレーム推論を発展させたこの方式は、**furusu氏により提案されました**。開始画像に加え、「次のセクションの開始画像」に相当する参照画像(`clean_latent_post`として扱われる)も入力として利用します。
|
| 59 |
+
* 生成する画像のRoPEタイムスタンプ(`target_index`)を、開始画像のタイムスタンプとセクション終端画像のタイムスタンプの中間的な値に設定します。
|
| 60 |
+
* さらに重要な点として、各参照画像のlatent表現に対してマスク処理(特定領域を0で埋めるなど)を施します。例えば、開始画像からはキャラクターの顔や体型を、参照画像からは服装のテクスチャを抽出するようにマスクを設定することで、キャラクターの「着せ替え」のような、両者の望ましい特徴を融合させた画像を生成できます。この手法も基本的には学習不要で実現可能です。
|
| 61 |
+
|
| 62 |
+
3. **1f-mc (one frame multi-control) 方式 (近接フレームブレンド)**:
|
| 63 |
+
* この方式は、**mattyamonaca氏により提案されました**。開始画像(例: `t=0`)と、その直後の画像(例: `t=1`、セクションの最初のフレーム)の2つを参照画像として入力し、それらの特徴をブレンドした単一画像を生成します。
|
| 64 |
+
* kisekaeichiとは異なり、latentマスクは通常行いません。
|
| 65 |
+
* この方式の真価を発揮するには、LoRA (Low-Rank Adaptation) による追加学習が推奨されます。学習により、モデルは2つの入力画像間の関係性やブレンド方法をより適切に学習し、特定の編集効果を実現できます。
|
| 66 |
+
|
| 67 |
+
### 汎用的な制御フレームワークへの統合
|
| 68 |
+
|
| 69 |
+
上記の各手法で利用されていた「参照画像の指定」「タイムスタンプの操作」「latentマスクの適用」といった概念を一般化し、より柔軟な制御を可能にするための拡張が行われました。
|
| 70 |
+
ユーザーは以下の要素を任意に指定して、推論およびLoRA学習を行うことができます。
|
| 71 |
+
|
| 72 |
+
* **制御画像 (Control Images)**: モデルに影響を与えるための任意の入力画像群。
|
| 73 |
+
* **Clean Latent Index (Indices)**: 各制御画像に対応するタイムスタンプ。FramePack内部の`clean latent index`として扱われ、時間軸上の任意の位置を指定可能です。`control_index`として指定します。
|
| 74 |
+
* **Latentマスク (Latent Masks)**: 各制御画像のlatentに適用するマスク。これにより、制御画像から利用する特徴を選択的に制御します。`control_image_mask_path`または制御画像のアルファチャンネルとして指定します。
|
| 75 |
+
* **Target Index**: 生成したい単一フレームのタイムスタンプ。
|
| 76 |
+
|
| 77 |
+
この汎用的な制御フレームワークと、それに対応した推論ツールおよびLoRA学習ツールの拡張により、以下のような高度な応用が可能になりました。
|
| 78 |
+
|
| 79 |
+
* プロンプトだけでは不安定だった1フレーム推論の効果(例: カメラ旋回)を安定化させるLoRAの開発。
|
| 80 |
+
* マスク処理を手動で行う代わりに、特定の条件下(例: 服の参照画像からキャラクター情報を無視する)で望ましい特徴マージを行うように学習させたkisekaeichi LoRAの開発。
|
| 81 |
+
|
| 82 |
+
これらの機能は、FramePackのポテンシャルを最大限に引き��し、静止画生成・編集における新たな創造の可能性を拓くものです。以降のセクションでは、これらの機能を実際に利用するための具体的なオプションについて説明します。
|
| 83 |
+
|
| 84 |
+
</details>
|
| 85 |
+
|
| 86 |
+
## One Frame (Single Frame) Training / 1フレーム学習
|
| 87 |
+
|
| 88 |
+
**This feature is experimental.** It trains in the same way as one frame inference.
|
| 89 |
+
|
| 90 |
+
The dataset must be an image dataset. If you use caption files, you need to specify `control_directory` and place the **start images** in that directory. The `image_directory` should contain the images after the change. The filenames of both directories must match. Caption files should be placed in the `image_directory`.
|
| 91 |
+
|
| 92 |
+
If you use JSONL files, specify them as `{"image_path": "/path/to/target_image1.jpg", "control_path": "/path/to/source_image1.jpg", "caption": "The object changes to red."}`. The `image_path` should point to the images after the change, and `control_path` should point to the starting images.
|
| 93 |
+
|
| 94 |
+
For the dataset configuration, see [here](./dataset_config.md#sample-for-image-dataset-with-control-images) and [here](./dataset_config.md#framepack-one-frame-training). There are also examples for kisekaeichi and 1f-mc settings.
|
| 95 |
+
|
| 96 |
+
For single frame training, specify `--one_frame` in `fpack_cache_latents.py` to create the cache. You can also use `--one_frame_no_2x` and `--one_frame_no_4x` options, which have the same meaning as `no_2x` and `no_4x` during inference. It is recommended to set these options to match the inference settings.
|
| 97 |
+
|
| 98 |
+
If you change whether to use one frame training or these options, please overwrite the existing cache without specifying `--skip_existing`.
|
| 99 |
+
|
| 100 |
+
Specify `--one_frame` in `fpack_train_network.py` to change the inference method during sample generation.
|
| 101 |
+
|
| 102 |
+
The optimal training settings are currently unknown. Feedback is welcome.
|
| 103 |
+
|
| 104 |
+
### Example of prompt file description for sample generation
|
| 105 |
+
|
| 106 |
+
The command line options `--one_frame_inference` corresponds to `--of`, and `--control_image_path` corresponds to `--ci`.
|
| 107 |
+
|
| 108 |
+
Note that `--ci` can be specified multiple times, but `--control_image_path` is specified as `--control_image_path img1.png img2.png`, while `--ci` is specified as `--ci img1.png --ci img2.png`.
|
| 109 |
+
|
| 110 |
+
Normal single frame training:
|
| 111 |
+
```
|
| 112 |
+
The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of no_2x,no_4x,target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
Kisekaeichi training:
|
| 116 |
+
```
|
| 117 |
+
The girl wears a school uniform. --i path/to/start_with_alpha.png --ci path/to/ref_with_alpha.png --ci path/to/start_with_alpha.png --of no_post,no_2x,no_4x,target_index=5,control_index=0;10 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
<details>
|
| 121 |
+
<summary>日本語</summary>
|
| 122 |
+
|
| 123 |
+
**この機能は実験的なものです。** 1フレーム推論と同様の方法で学習を行います。
|
| 124 |
+
|
| 125 |
+
データセットは画像データセットである必要があります。キャプションファイルを用いる場合は、`control_directory`を追加で指定し、そのディレクトリに**開始画像**を格納してください。`image_directory`には変化後の画像を格納します。両者のファイル名は一致させる必要があります。キャプションファイルは`image_directory`に格納してください。
|
| 126 |
+
|
| 127 |
+
JSONLファイルを用いる場合は、`{"image_path": "/path/to/target_image1.jpg", "control_path": "/path/to/source_image1.jpg", "caption": "The object changes to red"}`のように指定してください。`image_path`は変化後の画像、`control_path`は開始画像を指定します。
|
| 128 |
+
|
| 129 |
+
データセットの設定については、[こちら](./dataset_config.md#sample-for-image-dataset-with-control-images)と[こちら](./dataset_config.md#framepack-one-frame-training)も参照してください。kisekaeichiと1f-mcの設定例もそちらにあります。
|
| 130 |
+
|
| 131 |
+
1フレーム学習時は、`fpack_cache_latents.py`に`--one_frame`を指定してキャッシュを作成してください。また`--one_frame_no_2x`と`--one_frame_no_4x`オプションも利用可能です。推論時の`no_2x`、`no_4x`と同じ意味を持ちますので、推論時と同じ設定にすることをお勧めします。
|
| 132 |
+
|
| 133 |
+
1フレーム学習か否かを変更する場合、またこれらのオプションを変更する場合は、`--skip_existing`を指定せずに既存のキャッシュを上書きしてください。
|
| 134 |
+
|
| 135 |
+
また、`fpack_train_network.py`に`--one_frame`を指定してサンプル画像生成時の推論方法を変更してください。
|
| 136 |
+
|
| 137 |
+
最適な学習設定は今のところ不明です。フィードバックを歓迎します。
|
| 138 |
+
|
| 139 |
+
**サンプル生成のプロンプトファイル記述例**
|
| 140 |
+
|
| 141 |
+
コマンドラインオプション`--one_frame_inference`に相当する `--of`と、`--control_image_path`に相当する`--ci`が用意されています。
|
| 142 |
+
|
| 143 |
+
※ `--ci`は複数指定可能ですが、`--control_image_path`は`--control_image_path img1.png img2.png`のようにスペースで区切るのに対して、`--ci`は`--ci img1.png --ci img2.png`のように指定するので注意してください。
|
| 144 |
+
|
| 145 |
+
通常の1フレーム学習:
|
| 146 |
+
```
|
| 147 |
+
The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of no_2x,no_4x,target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
kisekaeichi方式:
|
| 151 |
+
```
|
| 152 |
+
The girl wears a school uniform. --i path/to/start_with_alpha.png --ci path/to/ref_with_alpha.png --ci path/to/start_with_alpha.png --of no_post,no_2x,no_4x,target_index=5,control_index=0;10 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
</details>
|
| 156 |
+
|
| 157 |
+
## One (single) Frame Inference / 1フレーム推論
|
| 158 |
+
|
| 159 |
+
**This feature is highly experimental** and not officially supported. It is intended for users who want to explore the potential of FramePack for one frame inference, which is not a standard feature of the model.
|
| 160 |
+
|
| 161 |
+
This script also allows for one frame inference, which is not an official feature of FramePack but rather a custom implementation.
|
| 162 |
+
|
| 163 |
+
Theoretically, it generates an image after a specified time from the starting image, following the prompt. This means that, although limited, it allows for natural language-based image editing.
|
| 164 |
+
|
| 165 |
+
To perform one frame inference, specify some option in the `--one_frame_inference` option. Here is an example:
|
| 166 |
+
|
| 167 |
+
```bash
|
| 168 |
+
--video_sections 1 --output_type latent_images --one_frame_inference default --image_path start_image.png --control_image_path start_image.png
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
The `--image_path` is used to obtain the SIGCLIP features for one frame inference. Normally, you should specify the starting image. The `--control_image_path` is newly used to specify the control image, but for normal one frame inference, you should also specify the starting image.
|
| 172 |
+
|
| 173 |
+
The `--one_frame_inference` option is recommended to be set to `default` or `no_2x,no_4x`. If you specify `--output_type` as `latent_images`, both the latent and image will be saved.
|
| 174 |
+
|
| 175 |
+
You can specify the following strings in the `--one_frame_inference` option, separated by commas:
|
| 176 |
+
|
| 177 |
+
- `no_2x`: Generates without passing clean latents 2x with zero vectors to the model. Slightly improves generation speed. The impact on generation results is unknown.
|
| 178 |
+
- `no_4x`: Generates without passing clean latents 4x with zero vectors to the model. Slightly improves generation speed. The impact on generation results is unknown.
|
| 179 |
+
- `no_post`: Generates without passing clean latents post with zero vectors to the model. Improves generation speed by about 20%, but may result in unstable generation.
|
| 180 |
+
- `target_index=<integer>`: Specifies the index of the image to be generated. The default is the last frame (i.e., `latent_window_size`).
|
| 181 |
+
|
| 182 |
+
For example, you can use `--one_frame_inference default` to pass clean latents 2x, clean latents 4x, and post to the model. `--one_frame_inference no_2x,no_4x` if you want to skip passing clean latents 2x and 4x to the model. `--one_frame_inference target_index=9` can be used to specify the target index for the generated image.
|
| 183 |
+
|
| 184 |
+
The `--one_frame_inference` option also supports advanced inference, which is described in the next section. This option allows for more detailed control using additional parameters like `target_index` and `control_index` within this option.
|
| 185 |
+
|
| 186 |
+
Normally, specify `--video_sections 1` to indicate only one section (one image).
|
| 187 |
+
|
| 188 |
+
Increasing `target_index` from the default of 9 may result in larger changes. It has been confirmed that generation can be performed without breaking up to around 40.
|
| 189 |
+
|
| 190 |
+
The `--one_frame_auto_resize` option has been added to automatically adjust the image size based on the control image size when `--one_frame_inference` is specified. If this option is enabled, the image size will be adjusted to the nearest bucket size with the specified width\*height, based on the control image size while maintaining the aspect ratio. This can be useful when the multiple generation of images with different sizes is required.
|
| 191 |
+
|
| 192 |
+
The `--end_image_path` is ignored for one frame inference.
|
| 193 |
+
|
| 194 |
+
<details>
|
| 195 |
+
<summary>日本語</summary>
|
| 196 |
+
|
| 197 |
+
**この機能は非常に実験的であり**、公式にはサポートされていません。FramePackを使用して1フレーム推論の可能性を試したいユーザーに向けたものです。
|
| 198 |
+
|
| 199 |
+
このスクリプトでは、単一画像の推論を行うこともできます。FramePack公式の機能ではなく、独自の実装です。
|
| 200 |
+
|
| 201 |
+
理論的には、開始画像から、プロンプトに従い、指定時間経過後の画像を生成します。つまり制限付きですが自然言語による画像編集を行うことができます。
|
| 202 |
+
|
| 203 |
+
単一画像推論を行うには`--one_frame_inference`オプションに、何らかのオプションを指定してください。記述例は以下の通りです。
|
| 204 |
+
|
| 205 |
+
```bash
|
| 206 |
+
--video_sections 1 --output_type latent_images --one_frame_inference default --image_path start_image.png --control_image_path start_image.png
|
| 207 |
+
```
|
| 208 |
+
|
| 209 |
+
`--image_path`は、1フレーム推��ではSIGCLIPの特徴量を取得するために用いられます。通常は開始画像を指定してください。`--control_image_path`は新しく追加された引数で、制御用画像を指定するために用いられますが、通常は開始画像を指定してください。
|
| 210 |
+
|
| 211 |
+
`--one_frame_inference`のオプションは、`default`または `no_2x,no_4x`を推奨します。`--output_type`に`latent_images`を指定するとlatentと画像の両方が保存されます。
|
| 212 |
+
|
| 213 |
+
`--one_frame_inference`のオプションには、カンマ区切りで以下のオプションを任意個数指定できます。
|
| 214 |
+
|
| 215 |
+
- `no_2x`: ゼロベクトルの clean latents 2xをモデルに渡さずに生成します。わずかに生成速度が向上します。生成結果への影響は不明です。
|
| 216 |
+
- `no_4x`: ゼロベクトルの clean latents 4xをモデルに渡さずに生成します。わずかに生成速度が向上します。生成結果への影響は不明です。
|
| 217 |
+
- `no_post`: ゼロベクトルの clean latents の post を渡さずに生成します。生成速度が20%程度向上しますが、生成結果が不安定になる場合があります。
|
| 218 |
+
- `target_index=<整数>`: 生成する画像のindexを指定します。デフォルトは最後のフレームです(=latent_window_size)。
|
| 219 |
+
|
| 220 |
+
たとえば、`--one_frame_inference default`を使用すると、clean latents 2x、clean latents 4x、postをモデルに渡します。`--one_frame_inference no_2x,no_4x`を使用すると、clean latents 2xと4xをモデルに渡すのをスキップします。`--one_frame_inference target_index=9`を使用して、生成する画像のターゲットインデックスを指定できます。
|
| 221 |
+
|
| 222 |
+
後述の高度な推論では、このオプション内で `target_index`、`control_index` といった追加のパラメータを指定して、より詳細な制御が可能です。
|
| 223 |
+
|
| 224 |
+
clean latents 2x、clean latents 4x、postをモデルに渡す場合でも値はゼロベクトルですが、値を渡すか否かで結果は変わります。特に`no_post`を指定すると、`latent_window_size`を大きくしたときに生成結果が不安定になる場合があります。
|
| 225 |
+
|
| 226 |
+
通常は`--video_sections 1` として1セクションのみ(画像1枚)を指定してください。
|
| 227 |
+
|
| 228 |
+
`target_index` をデフォルトの9から大きくすると、変化量が大きくなる可能性があります。40程度までは破綻なく生成されることを確認しています。
|
| 229 |
+
|
| 230 |
+
`--one_frame_auto_resize`オプションが追加されました。`--one_frame_inference`を指定した場合に、制御用画像のサイズに基づいて自動的に画像サイズを調整します。このオプションを有効にすると、画像サイズは、アスペクト比を維持しつつ制御用画像のサイズを基準に、指定された幅\*高さの最も近いバケットサイズに調整されます。異なるサイズの画像を複数生成する必要がある場合に便利です。
|
| 231 |
+
|
| 232 |
+
`--end_image_path`は無視されます。
|
| 233 |
+
|
| 234 |
+
</details>
|
| 235 |
+
|
| 236 |
+
## kisekaeichi method (Post Reference Options) and 1f-mc (Multi-Control) / kisekaeichi方式(ポスト参照オプション)と1f-mc(マルチコントロール)
|
| 237 |
+
|
| 238 |
+
The `kisekaeichi` method was proposed by furusu. The `1f-mc` method was proposed by mattyamonaca in pull request [#304](https://github.com/kohya-ss/musubi-tuner/pull/304).
|
| 239 |
+
|
| 240 |
+
In this repository, these methods have been integrated and can be specified with the `--one_frame_inference` option. This allows for specifying any number of control images as clean latents, along with indices. This means you can specify multiple starting images and multiple clean latent posts. Additionally, masks can be applied to each image.
|
| 241 |
+
|
| 242 |
+
It is expected to work only with FramePack (non-F1 model) and not with F1 models.
|
| 243 |
+
|
| 244 |
+
The following options have been added to `--one_frame_inference`. These can be used in conjunction with existing flags like `target_index`, `no_post`, `no_2x`, and `no_4x`.
|
| 245 |
+
|
| 246 |
+
- `control_index=<integer_or_semicolon_separated_integers>`: Specifies the index(es) of the clean latent for the control image(s). You must specify the same number of indices as the number of control images specified with `--control_image_path`.
|
| 247 |
+
|
| 248 |
+
Additionally, the following command-line options have been added. These arguments are only valid when `--one_frame_inference` is specified.
|
| 249 |
+
|
| 250 |
+
- `--control_image_path <path1> [<path2> ...]` : Specifies the path(s) to control (reference) image(s) for one frame inference. Provide one or more paths separated by spaces. Images with an alpha channel can be specified. If an alpha channel is present, it is used as a mask for the clean latent.
|
| 251 |
+
- `--control_image_mask_path <path1> [<path2> ...]` : Specifies the path(s) to grayscale mask(s) to be applied to the control image(s). Provide one or more paths separated by spaces. Each mask is applied to the corresponding control image. The 255 areas are referenced, while the 0 areas are ignored.
|
| 252 |
+
|
| 253 |
+
**Example of specifying kisekaeichi:**
|
| 254 |
+
|
| 255 |
+
The kisekaeichi method works without training, but using a dedicated LoRA may yield better results.
|
| 256 |
+
|
| 257 |
+
```bash
|
| 258 |
+
--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png clean_latent_post_image.png \
|
| 259 |
+
--one_frame_inference target_index=1,control_index=0;10,no_post,no_2x,no_4x --control_image_mask_path ctrl_mask1.png ctrl_mask2.png
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
In this example, `start_image.png` (for `clean_latent_pre`) and `clean_latent_post_image.png` (for `clean_latent_post`) are the reference images. The `target_index` specifies the index of the generated image. The `control_index` specifies the clean latent index for each control image, so it will be `0;10`. The masks for the control images are specified with `--control_image_mask_path`.
|
| 263 |
+
|
| 264 |
+
The optimal values for `target_index` and `control_index` are unknown. The `target_index` should be specified as 1 or higher. The `control_index` should be set to an appropriate value relative to `latent_window_size`. Specifying 1 for `target_index` results in less change from the starting image, but may introduce noise. Specifying 9 or 13 may reduce noise but result in larger changes from the original image.
|
| 265 |
+
|
| 266 |
+
The `control_index` should be larger than `target_index`. Typically, it is set to `10`, but larger values (e.g., around `13-16`) may also work.
|
| 267 |
+
|
| 268 |
+
Sample images and command lines for reproduction are as follows:
|
| 269 |
+
|
| 270 |
+
```bash
|
| 271 |
+
python fpack_generate_video.py --video_size 832 480 --video_sections 1 --infer_steps 25 \
|
| 272 |
+
--prompt "The girl in a school blazer in a classroom." --save_path path/to/output --output_type latent_images \
|
| 273 |
+
--dit path/to/dit --vae path/to/vae --text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 \
|
| 274 |
+
--image_encoder path/to/image_encoder --attn_mode sdpa --vae_spatial_tile_sample_min_size 128 --vae_chunk_size 32 \
|
| 275 |
+
--image_path path/to/kisekaeichi_start.png --control_image_path path/to/kisekaeichi_start.png path/to/kisekaeichi_ref.png
|
| 276 |
+
--one_frame_inference target_index=1,control_index=0;10,no_2x,no_4x,no_post
|
| 277 |
+
--control_image_mask_path path/to/kisekaeichi_start_mask.png path/to/kisekaeichi_ref_mask.png --seed 1234
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
Specify `--fp8_scaled` and `--blocks_to_swap` options according to your VRAM capacity.
|
| 281 |
+
|
| 282 |
+
- [kisekaeichi_start.png](./kisekaeichi_start.png)
|
| 283 |
+
- [kisekaeichi_ref.png](./kisekaeichi_ref.png)
|
| 284 |
+
- [kisekaeichi_start_mask.png](./kisekaeichi_start_mask.png)
|
| 285 |
+
- [kisekaeichi_ref_mask.png](./kisekaeichi_ref_mask.png)
|
| 286 |
+
|
| 287 |
+
Generation result: [kisekaeichi_result.png](./kisekaeichi_result.png)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
**Example of 1f-mc (Multi-Control):**
|
| 291 |
+
|
| 292 |
+
```bash
|
| 293 |
+
--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png 2nd_image.png \
|
| 294 |
+
--one_frame_inference target_index=9,control_index=0;1,no_2x,no_4x
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
In this example, `start_image.png` is the starting image, and `2nd_image.png` is the reference image. The `target_index=9` specifies the index of the generated image, while `control_index=0;1` specifies the clean latent indices for each control image.
|
| 298 |
+
|
| 299 |
+
1f-mc is intended to be used in combination with a trained LoRA, so adjust `target_index` and `control_index` according to the LoRA's description.
|
| 300 |
+
|
| 301 |
+
<details>
|
| 302 |
+
<summary>日本語</summary>
|
| 303 |
+
|
| 304 |
+
`kisekaeichi`方式はfurusu氏により提案されました。また`1f-mc`方式はmattyamonaca氏によりPR [#304](https://github.com/kohya-ss/musubi-tuner/pull/304) で提案されました。
|
| 305 |
+
|
| 306 |
+
当リポジトリではこれらの方式を統合し、`--one_frame_inference`オプションで指定できるようにしました。これにより、任意の枚数の制御用画像を clean latentとして指定し、さらにインデックスを指定できます。つまり開始画像の複数枚指定やclean latent postの複数枚指定などが可能です。また、それぞれの画像にマスクを適用することもできます。
|
| 307 |
+
|
| 308 |
+
なお、FramePack無印のみ動作し、F1モデルでは動作しないと思われます。
|
| 309 |
+
|
| 310 |
+
`--one_frame_inference`に以下のオプションが追加されています。`target_index`、`no_post`、`no_2x`や`no_4x`など既存のフラグと併用できます。
|
| 311 |
+
|
| 312 |
+
- `control_index=<整数またはセミコロン区切りの整数>`: 制御用画像のclean latentのインデックスを指定します。`--control_image_path`で指定した制御用画像の数と同じ数のインデックスを指定してください。
|
| 313 |
+
|
| 314 |
+
またコマンドラインオプションに以下が追加されています。これらの引数は`--one_frame_inference`を指定した場合のみ有効です。
|
| 315 |
+
|
| 316 |
+
- `--control_image_path <パス1> [<パス2> ...]` : 1フレーム推論用の制御用(参照)画像のパスを1つ以上、スペース区切りで指定します。アルファチャンネルを持つ画像が指定可能です。アルファチャンネルがある場合は、clean latentへのマスクとして利用されます。
|
| 317 |
+
- `--control_image_mask_path <パス1> [<パス2> ...]` : 制御用画像に適用するグレースケールマスクのパスを1つ以上、スペース区切りで指定します。各マスクは対応する制御用画像に適用されます。255の部分が参照される部分、0の部分が無視される部分です。
|
| 318 |
+
|
| 319 |
+
**kisekaeichiの指定例**:
|
| 320 |
+
|
| 321 |
+
kisekaeichi方式は学習なしでも動作しますが、専用のLoRAを使用することで、より良い結果が得られる可能性があります。
|
| 322 |
+
|
| 323 |
+
```bash
|
| 324 |
+
--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png clean_latent_post_image.png \
|
| 325 |
+
--one_frame_inference target_index=1,control_index=0;10,no_post,no_2x,no_4x --control_image_mask_path ctrl_mask1.png ctrl_mask2.png
|
| 326 |
+
```
|
| 327 |
+
|
| 328 |
+
`start_image.png`(clean_latent_preに相当)と`clean_latent_post_image.png`は参照画像(clean_latent_postに相当)です。`target_index`は生成する画像のインデックスを指定します。`control_index`はそれぞれの制御用画像のclean latent indexを指定しますので、`0;10` になります。また`--control_image_mask_path`に制御用画像に適用するマスクを指定します。
|
| 329 |
+
|
| 330 |
+
`target_index`、`control_index`の最適値は不明です。`target_index`は1以上を指定してください。`control_index`は`latent_window_size`に対して適切な値を指定してください。`target_index`に1を指定すると開始画像からの変化が少なくなりますが、ノイズが乗ったりすることが多いようです。9や13などを指定するとノイズは改善されるかもしれませんが、元の画像からの変化が大きくなります。
|
| 331 |
+
|
| 332 |
+
`control_index`は`target_index`より大きい値を指定してください。通常は`10`ですが、これ以上大きな値、たとえば`13~16程度でも動作するようです。
|
| 333 |
+
|
| 334 |
+
サンプル画像と再現のためのコマンドラインは以下のようになります。
|
| 335 |
+
|
| 336 |
+
```bash
|
| 337 |
+
python fpack_generate_video.py --video_size 832 480 --video_sections 1 --infer_steps 25 \
|
| 338 |
+
--prompt "The girl in a school blazer in a classroom." --save_path path/to/output --output_type latent_images \
|
| 339 |
+
--dit path/to/dit --vae path/to/vae --text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 \
|
| 340 |
+
--image_encoder path/to/image_encoder --attn_mode sdpa --vae_spatial_tile_sample_min_size 128 --vae_chunk_size 32 \
|
| 341 |
+
--image_path path/to/kisekaeichi_start.png --control_image_path path/to/kisekaeichi_start.png path/to/kisekaeichi_ref.png
|
| 342 |
+
--one_frame_inference target_index=1,control_index=0;10,no_2x,no_4x,no_post
|
| 343 |
+
--control_image_mask_path path/to/kisekaeichi_start_mask.png path/to/kisekaeichi_ref_mask.png --seed 1234
|
| 344 |
+
```
|
| 345 |
+
|
| 346 |
+
VRAM容量に応じて、`--fp8_scaled`や`--blocks_to_swap`等のオプションを調整してください。
|
| 347 |
+
|
| 348 |
+
- [kisekaeichi_start.png](./kisekaeichi_start.png)
|
| 349 |
+
- [kisekaeichi_ref.png](./kisekaeichi_ref.png)
|
| 350 |
+
- [kisekaeichi_start_mask.png](./kisekaeichi_start_mask.png)
|
| 351 |
+
- [kisekaeichi_ref_mask.png](./kisekaeichi_ref_mask.png)
|
| 352 |
+
|
| 353 |
+
生成結果:
|
| 354 |
+
- [kisekaeichi_result.png](./kisekaeichi_result.png)
|
| 355 |
+
|
| 356 |
+
**1f-mcの指定例**:
|
| 357 |
+
|
| 358 |
+
```bash
|
| 359 |
+
--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png 2nd_image.png \
|
| 360 |
+
--one_frame_inference target_index=9,control_index=0;1,no_2x,no_4x
|
| 361 |
+
```
|
| 362 |
+
|
| 363 |
+
この例では、`start_image.png`が開始画像で、`2nd_image.png`が参照画像です。`target_index=9`は生成する画像のインデックスを指定し、`control_index=0;1`はそれぞれの制御用画像のclean latent indexを指定しています。
|
| 364 |
+
|
| 365 |
+
1f-mcは学習したLoRAと組み合わせることを想定していますので、そのLoRAの説明に従って、`target_index`や`control_index`を調整してください。
|
| 366 |
+
|
| 367 |
+
</details>
|
docs/hunyuan_video.md
ADDED
|
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# HunyuanVideo
|
| 4 |
+
|
| 5 |
+
## Overview / 概要
|
| 6 |
+
|
| 7 |
+
This document describes the usage of the [HunyuanVideo](https://github.com/Tencent/HunyuanVideo) architecture within the Musubi Tuner framework. HunyuanVideo is a video generation model that supports text-to-video generation.
|
| 8 |
+
|
| 9 |
+
This feature is experimental.
|
| 10 |
+
|
| 11 |
+
<details>
|
| 12 |
+
<summary>日本語</summary>
|
| 13 |
+
|
| 14 |
+
このドキュメントは、Musubi Tunerフレームワーク内での[HunyuanVideo](https://github.com/Tencent/HunyuanVideo)アーキテクチャの使用法について説明しています。HunyuanVideoはテキストから動画を生成するモデルです。
|
| 15 |
+
|
| 16 |
+
この機能は実験的なものです。
|
| 17 |
+
</details>
|
| 18 |
+
|
| 19 |
+
## Download the model / モデルのダウンロード
|
| 20 |
+
|
| 21 |
+
There are two ways to download the model.
|
| 22 |
+
|
| 23 |
+
### Use the Official HunyuanVideo Model / 公式HunyuanVideoモデルを使う
|
| 24 |
+
|
| 25 |
+
Download the model following the [official README](https://github.com/Tencent/HunyuanVideo/blob/main/ckpts/README.md) and place it in your chosen directory with the following structure:
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
ckpts
|
| 29 |
+
├──hunyuan-video-t2v-720p
|
| 30 |
+
│ ├──transformers
|
| 31 |
+
│ ├──vae
|
| 32 |
+
├──text_encoder
|
| 33 |
+
├──text_encoder_2
|
| 34 |
+
├──...
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
### Using ComfyUI Models for Text Encoder / Text EncoderにComfyUI提供のモデルを使う
|
| 38 |
+
|
| 39 |
+
This method is easier.
|
| 40 |
+
|
| 41 |
+
For DiT and VAE, use the HunyuanVideo models.
|
| 42 |
+
|
| 43 |
+
From https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/transformers, download [mp_rank_00_model_states.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt) and place it in your chosen directory.
|
| 44 |
+
|
| 45 |
+
(Note: The fp8 model on the same page is unverified.)
|
| 46 |
+
|
| 47 |
+
If you are training with `--fp8_base`, you can use `mp_rank_00_model_states_fp8.safetensors` from [here](https://huggingface.co/kohya-ss/HunyuanVideo-fp8_e4m3fn-unofficial) instead of `mp_rank_00_model_states.pt`. (This file is unofficial and simply converts the weights to float8_e4m3fn.)
|
| 48 |
+
|
| 49 |
+
From https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/vae, download [pytorch_model.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/vae/pytorch_model.pt) and place it in your chosen directory.
|
| 50 |
+
|
| 51 |
+
For the Text Encoder, use the models provided by ComfyUI. Refer to [ComfyUI's page](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/), from https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/tree/main/split_files/text_encoders, download `llava_llama3_fp16.safetensors` (Text Encoder 1, LLM) and `clip_l.safetensors` (Text Encoder 2, CLIP) and place them in your chosen directory.
|
| 52 |
+
|
| 53 |
+
(Note: The fp8 LLM model on the same page is unverified.)
|
| 54 |
+
|
| 55 |
+
<details>
|
| 56 |
+
<summary>日本語</summary>
|
| 57 |
+
|
| 58 |
+
以下のいずれかの方法で、モデルをダウンロードしてください。
|
| 59 |
+
|
| 60 |
+
### HunyuanVideoの公式モデルを使う
|
| 61 |
+
|
| 62 |
+
[公式のREADME](https://github.com/Tencent/HunyuanVideo/blob/main/ckpts/README.md)を参考にダウンロードし、任意のディレクトリに以下のように配置します。
|
| 63 |
+
|
| 64 |
+
```
|
| 65 |
+
ckpts
|
| 66 |
+
├──hunyuan-video-t2v-720p
|
| 67 |
+
│ ├──transformers
|
| 68 |
+
│ ├──vae
|
| 69 |
+
├──text_encoder
|
| 70 |
+
├──text_encoder_2
|
| 71 |
+
├──...
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
### Text EncoderにComfyUI提供のモデルを使う
|
| 75 |
+
|
| 76 |
+
こちらの方法の方がより簡単です。DiTとVAEのモデルはHumyuanVideoのものを使用します。
|
| 77 |
+
|
| 78 |
+
https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/transformers から、[mp_rank_00_model_states.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt) をダウンロードし、任意のディレクトリに配置します。
|
| 79 |
+
|
| 80 |
+
(同じページにfp8のモデルもありますが、未検証です。)
|
| 81 |
+
|
| 82 |
+
`--fp8_base`を指定して学習する場合は、`mp_rank_00_model_states.pt`の代わりに、[こちら](https://huggingface.co/kohya-ss/HunyuanVideo-fp8_e4m3fn-unofficial)の`mp_rank_00_model_states_fp8.safetensors`を使用可能です。(このファイルは非公式のもので、重みを単純にfloat8_e4m3fnに変換したものです。)
|
| 83 |
+
|
| 84 |
+
また、https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/vae から、[pytorch_model.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/vae/pytorch_model.pt) をダウンロードし、任意のディレクトリに配置します。
|
| 85 |
+
|
| 86 |
+
Text EncoderにはComfyUI提供のモデルを使用させていただきます。[ComyUIのページ](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/)を参考に、https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/tree/main/split_files/text_encoders から、llava_llama3_fp16.safetensors (Text Encoder 1、LLM)と、clip_l.safetensors (Text Encoder 2、CLIP)をダウンロードし、任意の���ィレクトリに配置します。
|
| 87 |
+
|
| 88 |
+
(同じページにfp8のLLMモデルもありますが、動作未検証です。)
|
| 89 |
+
|
| 90 |
+
</details>
|
| 91 |
+
|
| 92 |
+
## Pre-caching / 事前キャッシング
|
| 93 |
+
|
| 94 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 95 |
+
|
| 96 |
+
Latent pre-caching is required. Create the cache using the following command:
|
| 97 |
+
|
| 98 |
+
If you have installed using pip:
|
| 99 |
+
|
| 100 |
+
```bash
|
| 101 |
+
python src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
If you have installed with `uv`, you can use `uv run --extra cu124` to run the script. If CUDA 12.8 or 13.0 is supported, `uv run --extra cu128` or `uv run --extra cu130` is also available. Other scripts can be run in the same way. (Note that the installation with `uv` is experimental. Feedback is welcome. If you encounter any issues, please use the pip-based installation.)
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
uv run --extra cu124 src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
For additional options, use `python src/musubi_tuner/cache_latents.py --help`.
|
| 111 |
+
|
| 112 |
+
If you're running low on VRAM, reduce `--vae_spatial_tile_sample_min_size` to around 128 and lower the `--batch_size` (`--vae_spatial_tile_sample_min_size` may not exist in architectures other than HunyuanVideo, see the documentation for each architecture).
|
| 113 |
+
|
| 114 |
+
If you are using an AMD GPU and/or are experiencing slow latent caching, consider trying `--disable_cudnn_backend`. For some details, see [this pull request](https://github.com/kohya-ss/musubi-tuner/pull/592).
|
| 115 |
+
|
| 116 |
+
Use `--debug_mode image` to display dataset images and captions in a new window, or `--debug_mode console` to display them in the console (requires `ascii-magic`).
|
| 117 |
+
|
| 118 |
+
With `--debug_mode video`, images or videos will be saved in the cache directory (please delete them after checking). The bitrate of the saved video is set to 1Mbps for preview purposes. The images decoded from the original video (not degraded) are used for the cache (for training).
|
| 119 |
+
|
| 120 |
+
When `--debug_mode` is specified, the actual caching process is not performed.
|
| 121 |
+
|
| 122 |
+
By default, cache files not included in the dataset are automatically deleted. You can still keep cache files as before by specifying `--keep_cache`.
|
| 123 |
+
|
| 124 |
+
<details>
|
| 125 |
+
<summary>日本語</summary>
|
| 126 |
+
|
| 127 |
+
latentの事前キャッシュは必須です。以下のコマンドを使用して、事前キャッシュを作成してください。(pipによるインストールの場合)
|
| 128 |
+
|
| 129 |
+
```bash
|
| 130 |
+
python src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
uvでインストールした場合は、`uv run --extra cu124 python src/musubi_tuner/cache_latents.py ...`のように、`uv run --extra cu124`を先頭につけてください。CUDA 12.8や13.0に対応している場合は、`uv run --extra cu128`や`uv run --extra cu130`も利用可能です。以下のコマンドも同様です。
|
| 134 |
+
|
| 135 |
+
その他のオプションは`python src/musubi_tuner/cache_latents.py --help`で確認できます。
|
| 136 |
+
|
| 137 |
+
VRAMが足りない場合は、`--vae_spatial_tile_sample_min_size`を128程度に減らし、`--batch_size`を小さくしてください。
|
| 138 |
+
|
| 139 |
+
`--debug_mode image` を指定するとデータセットの画像とキャプションが新規ウィンドウに表示されます。`--debug_mode console`でコンソールに表示されます(`ascii-magic`が必要)。
|
| 140 |
+
|
| 141 |
+
`--debug_mode video`で、キャッシュディレクトリに画像または動画が保存されます(確認後、削除してください)。動画のビットレートは確認用に低くしてあります。実際には元動画の画像が学習に使用されます。
|
| 142 |
+
|
| 143 |
+
`--debug_mode`指定時は、実際のキャッシュ処理は行われません。
|
| 144 |
+
|
| 145 |
+
デフォルトではデータセットに含まれないキャッシュファイルは自動的に削除されます。`--keep_cache`を指定すると、キャッシュファイルを残すことができます。
|
| 146 |
+
|
| 147 |
+
</details>
|
| 148 |
+
|
| 149 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 150 |
+
|
| 151 |
+
Text Encoder output pre-caching is required. Create the cache using the following command:
|
| 152 |
+
|
| 153 |
+
```bash
|
| 154 |
+
python src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
or for uv:
|
| 158 |
+
|
| 159 |
+
```bash
|
| 160 |
+
uv run --extra cu124 src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
For additional options, use `python src/musubi_tuner/cache_text_encoder_outputs.py --help`.
|
| 164 |
+
|
| 165 |
+
Adjust `--batch_size` according to your available VRAM.
|
| 166 |
+
|
| 167 |
+
For systems with limited VRAM (less than ~16GB), use `--fp8_llm` to run the LLM in fp8 mode.
|
| 168 |
+
|
| 169 |
+
By default, cache files not included in the dataset are automatically deleted. You can still keep cache files as before by specifying `--keep_cache`.
|
| 170 |
+
|
| 171 |
+
<details>
|
| 172 |
+
<summary>日本語</summary>
|
| 173 |
+
|
| 174 |
+
Text Encoder出力の事前キャッシュは必須です。以下のコマンドを使用して、事前キャッシュを作成してください。
|
| 175 |
+
|
| 176 |
+
```bash
|
| 177 |
+
python src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
その他のオプションは`python src/musubi_tuner/cache_text_encoder_outputs.py --help`で確認できます。
|
| 181 |
+
|
| 182 |
+
`--batch_size`はVRAMに合わせて調整してください。
|
| 183 |
+
|
| 184 |
+
VRAMが足りない場合(16GB程度未満の場合)は、`--fp8_llm`を指定して、fp8でLLMを実行してください。
|
| 185 |
+
|
| 186 |
+
デフォルトではデータセットに含まれないキャッシュファイルは自動的に削除されます。`--keep_cache`を指定すると、キャッシュファイルを残すことができます。
|
| 187 |
+
|
| 188 |
+
</details>
|
| 189 |
+
|
| 190 |
+
## Training / 学習
|
| 191 |
+
|
| 192 |
+
Start training using the following command (input as a single line):
|
| 193 |
+
|
| 194 |
+
```bash
|
| 195 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py
|
| 196 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt
|
| 197 |
+
--dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base
|
| 198 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing
|
| 199 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers
|
| 200 |
+
--network_module networks.lora --network_dim 32
|
| 201 |
+
--timestep_sampling shift --discrete_flow_shift 7.0
|
| 202 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42
|
| 203 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
or for uv:
|
| 207 |
+
|
| 208 |
+
```bash
|
| 209 |
+
uv run --extra cu124 accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py
|
| 210 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt
|
| 211 |
+
--dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base
|
| 212 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing
|
| 213 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers
|
| 214 |
+
--network_module networks.lora --network_dim 32
|
| 215 |
+
--timestep_sampling shift --discrete_flow_shift 7.0
|
| 216 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42
|
| 217 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 218 |
+
```
|
| 219 |
+
|
| 220 |
+
If the details of the image are not learned well, try lowering the discrete flow shift to around 3.0.
|
| 221 |
+
|
| 222 |
+
The training settings are still experimental. Appropriate learning rates, training steps, timestep distribution, loss weighting, etc. are not yet known. Feedback is welcome.
|
| 223 |
+
|
| 224 |
+
For additional options, use `python src/musubi_tuner/hv_train_network.py --help` (note that many options are unverified).
|
| 225 |
+
|
| 226 |
+
### Memory Optimization
|
| 227 |
+
|
| 228 |
+
`--gradient_checkpointing` enables gradient checkpointing to reduce VRAM usage. Gradient checkpointing is a memory-saving technique that trades off computation time for memory usage by recomputing certain intermediate results during the backward pass instead of storing them all in memory. This is particularly useful for training large models such as HunyuanVideo, where VRAM can be a limiting factor. However, it may slow down training. If you have sufficient VRAM, you can disable it.
|
| 229 |
+
|
| 230 |
+
Specifying `--fp8_base` runs DiT in fp8 mode. Without this flag, mixed precision data type will be used. fp8 can significantly reduce memory consumption but may impact output quality. If `--fp8_base` is not specified, 24GB or more VRAM is recommended. Use `--blocks_to_swap` as needed.
|
| 231 |
+
|
| 232 |
+
If you're running low on VRAM, use `--blocks_to_swap` to offload some blocks to CPU. Maximum value is 36.
|
| 233 |
+
|
| 234 |
+
(The idea of block swap is based on the implementation by 2kpr. Thanks again to 2kpr.)
|
| 235 |
+
|
| 236 |
+
`--use_pinned_memory_for_block_swap` can be used to enable pinned memory for block swapping. This may improve performance when swapping blocks between CPU and GPU. However, it may increase shared VRAM usage on Windows systems. Use this option based on your system configuration (e.g., available system RAM and VRAM). In some environments, not specifying this option may result in faster performance.
|
| 237 |
+
|
| 238 |
+
`--gradient_checkpointing_cpu_offload` can be used to offload activations to CPU when using gradient checkpointing. This can further reduce VRAM usage, but may slow down training. This option is especially useful when the latent resolution (or video length) is high and VRAM is limited. This option must be used together with `--gradient_checkpointing`. See [PR #537](https://github.com/kohya-ss/musubi-tuner/pull/537) for more details.
|
| 239 |
+
|
| 240 |
+
### Attention
|
| 241 |
+
|
| 242 |
+
Use `--sdpa` for PyTorch's scaled dot product attention. Use `--flash_attn` for [FlashAttention](https://github.com/Dao-AILab/flash-attention). Use `--xformers` for xformers, but specify `--split_attn` when using xformers. `--sage_attn` for SageAttention, but SageAttention is not yet supported for training, so it raises a ValueError.
|
| 243 |
+
|
| 244 |
+
`--split_attn` processes attention in chunks. Speed may be slightly reduced, but VRAM usage is slightly reduced.
|
| 245 |
+
|
| 246 |
+
### Timestep Sampling
|
| 247 |
+
You can also specify the range of timesteps
|
| 248 |
+
with `--min_timestep` and `--max_timestep`. See [advanced configuration](../advanced_config.md#specify-time-step-range-for-training--学習時のタイムステップ範囲の指定) for details.
|
| 249 |
+
|
| 250 |
+
`--show_timesteps` can be set to `image` (requires `matplotlib`) or `console` to display timestep distribution and loss weighting during training. (When using `flux_shift` and `qwen_shift`, the distribution will be for images with a resolution of 1024x1024.)
|
| 251 |
+
|
| 252 |
+
### Other Options
|
| 253 |
+
|
| 254 |
+
The format of LoRA trained is the same as `sd-scripts`.
|
| 255 |
+
|
| 256 |
+
You can record logs during training. Refer to [Save and view logs in TensorBoard format](../advanced_config.md#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照).
|
| 257 |
+
|
| 258 |
+
For PyTorch Dynamo optimization, refer to [this document](../advanced_config.md#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化).
|
| 259 |
+
|
| 260 |
+
For sample image generation during training, refer to [this document](../sampling_during_training.md). For advanced configuration, refer to [this document](../advanced_config.md).
|
| 261 |
+
|
| 262 |
+
<details>
|
| 263 |
+
<summary>日本語</summary>
|
| 264 |
+
|
| 265 |
+
以下のコマンドを使用して、学習を開始します(実際には一行で入力してください)。
|
| 266 |
+
|
| 267 |
+
```bash
|
| 268 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py
|
| 269 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt
|
| 270 |
+
--dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base
|
| 271 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing
|
| 272 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers
|
| 273 |
+
--network_module networks.lora --network_dim 32
|
| 274 |
+
--timestep_sampling shift --discrete_flow_shift 7.0
|
| 275 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42
|
| 276 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 277 |
+
```
|
| 278 |
+
|
| 279 |
+
ディテールが甘くなる場合は、discrete flow shiftを3.0程度に下げてみてください。
|
| 280 |
+
|
| 281 |
+
ただ、適切な学習率、学習ステップ数、timestepsの分布、loss weightingなどのパラメータは、以前として不明な点が数多くあります。情報提供をお待ちしています。
|
| 282 |
+
|
| 283 |
+
その他のオプションは`python src/musubi_tuner/hv_train_network.py --help`で確認できます(ただし多くのオプションは動作未確認です)。
|
| 284 |
+
|
| 285 |
+
**メモリ最適化**
|
| 286 |
+
|
| 287 |
+
`--gradient_checkpointing`でgradient checkpointingを有効にします。VRAM使用量を削減できます。gradient checkpointingは、バックワードパス中に一部の中間結果をすべてメモリに保存するのではなく、再計算することで、計算時間とメモリ使用量をトレードオフするメモリ節約技術です。HunyuanVideoのような大規模モデルの学習ではVRAMが制約となることが多いため、特に有用です。ただし学習が遅くなる可能性があります。十分なVRAMがある場合は無効にしても構いません。
|
| 288 |
+
|
| 289 |
+
`--fp8_base`を指定すると、DiTがfp8で学習されます。未指定時はmixed precisionのデータ型が使用されます。fp8は大きく消費メモリを削減できますが、品質は低下する可能性があります。`--fp8_base`を指定しない場合はVRAM 24GB以上を推奨します。また必要に応じて`--blocks_to_swap`を使用してください。
|
| 290 |
+
|
| 291 |
+
VRAMが足りない場合は、`--blocks_to_swap`を指定して、一部のブロックをCPUにオフロードしてください。最大36が指定できます。
|
| 292 |
+
|
| 293 |
+
(block swapのアイデアは2kpr氏の実装に基づくものです。2kpr氏にあらためて感謝します。)
|
| 294 |
+
|
| 295 |
+
`--use_pinned_memory_for_block_swap`を指定すると、block swapにピン留めメモリを使用します。CPUとGPU間でブロックをスワップする際のパフォーマンスが向上する可能性があります。ただし、Windows環境では共有VRAM使用量が増加する可能性があります。システム構成(利用可能なシステムRAMやVRAMなど)に応じて、このオプションを使用してください。環境によっては指定しないほうが高速になる場合もあります。
|
| 296 |
+
|
| 297 |
+
`--gradient_checkpointing_cpu_offload`を指定すると、gradient checkpointing使用時にアクティベーションをCPUにオフロードします。これによりVRAM使用量をさらに削減できますが、学習が遅くなる可能性があります。latent解像度(または動画長)が高く、VRAMが限られている場合に特に有用です。このオプションは`--gradient_checkpointing`と併用する必要があります。詳細は[PR #537](https://github.com/Dao-AILab/flash-attention/pull/537)を参照してください。
|
| 298 |
+
|
| 299 |
+
**Attention**
|
| 300 |
+
|
| 301 |
+
`--sdpa`でPyTorchのscaled dot product attentionを使用します。`--flash_attn`で[FlashAttention]:(https://github.com/Dao-AILab/flash-attention)を使用します。`--xformers`でxformersの利用も可能ですが、xformersを使う場合は`--split_attn`を指定してください。`--sage_attn`でSageAttentionを使用しますが、SageAttentionは現時点では学習に未対応のため、エラーが発生します。
|
| 302 |
+
|
| 303 |
+
`--split_attn`を指定すると、attentionを分割して処理します。速度が多少低下しますが、VRAM使用量はわずかに減ります。
|
| 304 |
+
|
| 305 |
+
**タイムステップサンプリング**
|
| 306 |
+
|
| 307 |
+
`--min_timestep`と`--max_timestep`を指定すると、学習時のタイムステップの範囲を指定できます。詳細は[高度な設定](../advanced_config.md#specify-time-step-range-for-training--学習時のタイムステップ範囲の指定)を参照してください。
|
| 308 |
+
|
| 309 |
+
`--show_timesteps`に`image`(`matplotlib`が必要)または`console`を指定すると、学習時のtimestepsの分布とtimestepsごとのloss weightingが確認できます。(`flux_shift`と`qwen_shift`を使用する場合は画像の解像度が1024x1024の場合の分布になります。)
|
| 310 |
+
|
| 311 |
+
**その他のオプション**
|
| 312 |
+
|
| 313 |
+
学習されるLoRAの形式は、`sd-scripts`と同じです。
|
| 314 |
+
|
| 315 |
+
学習時のログの記録が可能です。[TensorBoard形式のログの保存と参照](../advanced_config.md#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照)を参照してください。
|
| 316 |
+
|
| 317 |
+
PyTorch Dynamoによる最適化を行う場合は、[こちら](../advanced_config.md#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化)を参照してください。
|
| 318 |
+
|
| 319 |
+
学習中のサンプル画像生成については、[こちらのドキュメント](../sampling_during_training.md)を参照してください。その他の高度な設定については[こちらのドキュメント](../advanced_config.md)を参照してください。
|
| 320 |
+
|
| 321 |
+
</details>
|
| 322 |
+
|
| 323 |
+
### Merging LoRA Weights / LoRAの重みのマージ
|
| 324 |
+
|
| 325 |
+
```bash
|
| 326 |
+
python src/musubi_tuner/merge_lora.py \
|
| 327 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \
|
| 328 |
+
--lora_weight path/to/lora.safetensors \
|
| 329 |
+
--save_merged_model path/to/merged_model.safetensors \
|
| 330 |
+
--device cpu \
|
| 331 |
+
--lora_multiplier 1.0
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
or for uv:
|
| 335 |
+
|
| 336 |
+
```bash
|
| 337 |
+
uv run --extra cu124 src/musubi_tuner/merge_lora.py \
|
| 338 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \
|
| 339 |
+
--lora_weight path/to/lora.safetensors \
|
| 340 |
+
--save_merged_model path/to/merged_model.safetensors \
|
| 341 |
+
--device cpu \
|
| 342 |
+
--lora_multiplier 1.0
|
| 343 |
+
```
|
| 344 |
+
|
| 345 |
+
Specify the device to perform the calculation (`cpu` or `cuda`, etc.) with `--device`. Calculation will be faster if `cuda` is specified.
|
| 346 |
+
|
| 347 |
+
Specify the LoRA weights to merge with `--lora_weight` and the multiplier for the LoRA weights with `--lora_multiplier`. Multiple values can be specified, and the number of values must match.
|
| 348 |
+
|
| 349 |
+
<details>
|
| 350 |
+
<summary>日本語</summary>
|
| 351 |
+
|
| 352 |
+
```bash
|
| 353 |
+
python src/musubi_tuner/merge_lora.py \
|
| 354 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \
|
| 355 |
+
--lora_weight path/to/lora.safetensors \
|
| 356 |
+
--save_merged_model path/to/merged_model.safetensors \
|
| 357 |
+
--device cpu \
|
| 358 |
+
--lora_multiplier 1.0
|
| 359 |
+
```
|
| 360 |
+
|
| 361 |
+
`--device`には計算を行うデバイス(`cpu`または`cuda`等)を指定してください。`cuda`を指定すると計算が高速化されます。
|
| 362 |
+
|
| 363 |
+
`--lora_weight`にはマージするLoRAの重みを、`--lora_multiplier`にはLoRAの重みの係数を、それぞれ指定してください。複数個が指定可能で、両者の数は一致させてください。
|
| 364 |
+
|
| 365 |
+
</details>
|
| 366 |
+
|
| 367 |
+
## Inference / 推論
|
| 368 |
+
|
| 369 |
+
Generate videos using the following command:
|
| 370 |
+
|
| 371 |
+
```bash
|
| 372 |
+
python src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30
|
| 373 |
+
--prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both
|
| 374 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn
|
| 375 |
+
--vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt
|
| 376 |
+
--vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128
|
| 377 |
+
--text_encoder1 path/to/ckpts/text_encoder
|
| 378 |
+
--text_encoder2 path/to/ckpts/text_encoder_2
|
| 379 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
or for uv:
|
| 383 |
+
|
| 384 |
+
```bash
|
| 385 |
+
uv run --extra cu124 src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30
|
| 386 |
+
--prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both
|
| 387 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn
|
| 388 |
+
--vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt
|
| 389 |
+
--vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128
|
| 390 |
+
--text_encoder1 path/to/ckpts/text_encoder
|
| 391 |
+
--text_encoder2 path/to/ckpts/text_encoder_2
|
| 392 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 393 |
+
```
|
| 394 |
+
|
| 395 |
+
For additional options, use `python src/musubi_tuner/hv_generate_video.py --help`.
|
| 396 |
+
|
| 397 |
+
Specifying `--fp8` runs DiT in fp8 mode. fp8 can significantly reduce memory consumption but may impact output quality.
|
| 398 |
+
|
| 399 |
+
`--fp8_fast` option is also available for faster inference on RTX 40x0 GPUs. This option requires `--fp8` option.
|
| 400 |
+
|
| 401 |
+
If you're running low on VRAM, use `--blocks_to_swap` to offload some blocks to CPU. Maximum value is 38.
|
| 402 |
+
|
| 403 |
+
For `--attn_mode`, specify either `flash`, `torch`, `sageattn`, `xformers`, or `sdpa` (same as `torch`). These correspond to FlashAttention, scaled dot product attention, SageAttention, and xformers, respectively. Default is `torch`. SageAttention is effective for VRAM reduction.
|
| 404 |
+
|
| 405 |
+
Specifing `--split_attn` will process attention in chunks. Inference with SageAttention is expected to be about 10% faster.
|
| 406 |
+
|
| 407 |
+
For `--output_type`, specify either `both`, `latent`, `video` or `images`. `both` outputs both latents and video. Recommended to use `both` in case of Out of Memory errors during VAE processing. You can specify saved latents with `--latent_path` and use `--output_type video` (or `images`) to only perform VAE decoding.
|
| 408 |
+
|
| 409 |
+
`--seed` is optional. A random seed will be used if not specified.
|
| 410 |
+
|
| 411 |
+
`--video_length` should be specified as "a multiple of 4 plus 1".
|
| 412 |
+
|
| 413 |
+
`--flow_shift` can be specified to shift the timestep (discrete flow shift). The default value when omitted is 7.0, which is the recommended value for 50 inference steps. In the HunyuanVideo paper, 7.0 is recommended for 50 steps, and 17.0 is recommended for less than 20 steps (e.g. 10).
|
| 414 |
+
|
| 415 |
+
By specifying `--video_path`, video2video inference is possible. Specify a video file or a directory containing multiple image files (the image files are sorted by file name and used as frames). An error will occur if the video is shorter than `--video_length`. You can specify the strength with `--strength`. It can be specified from 0 to 1.0, and the larger the value, the greater the change from the original video.
|
| 416 |
+
|
| 417 |
+
Note that video2video inference is experimental.
|
| 418 |
+
|
| 419 |
+
`--compile` option enables PyTorch's compile feature (experimental). Requires triton. On Windows, also requires Visual C++ build tools installed and PyTorch>=2.6.0 (Visual C++ build tools is also required). See [the torch.compile documentation](torch_compile.md) for details.
|
| 420 |
+
|
| 421 |
+
The `--compile` option takes a long time to run the first time, but speeds up on subsequent runs.
|
| 422 |
+
|
| 423 |
+
You can save the DiT model after LoRA merge with the `--save_merged_model` option. Specify `--save_merged_model path/to/merged_model.safetensors`. Note that inference will not be performed when this option is specified.
|
| 424 |
+
|
| 425 |
+
<details>
|
| 426 |
+
<summary>日本語</summary>
|
| 427 |
+
|
| 428 |
+
以下のコマンドを使用して動画を生成します。
|
| 429 |
+
|
| 430 |
+
```bash
|
| 431 |
+
python src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30
|
| 432 |
+
--prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both
|
| 433 |
+
--dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn
|
| 434 |
+
--vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt
|
| 435 |
+
--vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128
|
| 436 |
+
--text_encoder1 path/to/ckpts/text_encoder
|
| 437 |
+
--text_encoder2 path/to/ckpts/text_encoder_2
|
| 438 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 439 |
+
```
|
| 440 |
+
|
| 441 |
+
その他のオプションは`python src/musubi_tuner/hv_generate_video.py --help`で確認できます。
|
| 442 |
+
|
| 443 |
+
`--fp8`を指定すると、DiTがfp8で推論されます。fp8は大きく消費メモリを削減できますが、品質は低下する可能性があります。
|
| 444 |
+
|
| 445 |
+
RTX 40x0シリーズのGPUを使用している場合は、`--fp8_fast`オプションを指定することで、高速推論が可能です。このオプションを指定する場合は、`--fp8`も指定してください。
|
| 446 |
+
|
| 447 |
+
VRAMが足りない場合は、`--blocks_to_swap`を指定して、一部のブロックをCPUにオフロードしてください。最大38が指定できます。
|
| 448 |
+
|
| 449 |
+
`--attn_mode`には`flash`、`torch`、`sageattn`、`xformers`または`sdpa`(`torch`指定時と同じ)のいずれかを指定してください。それぞれFlashAttention、scaled dot product attention、SageAttention、xformersに対応します。デフォルトは`torch`です。SageAttentionはVRAMの削減に有効です。
|
| 450 |
+
|
| 451 |
+
`--split_attn`を指定すると、attentionを分割して処理します。SageAttention利用時で10%程度の高速化が見込まれます。
|
| 452 |
+
|
| 453 |
+
`--output_type`には`both`、`latent`、`video`、`images`のいずれかを指定してください。`both`はlatentと動画の両方を出力します。VAEでOut of Memoryエラーが発生する場合に備えて、`both`を指定す���ことをお勧めします。`--latent_path`に保存されたlatentを指定し、`--output_type video` (または`images`)としてスクリプトを実行すると、VAEのdecodeのみを行えます。
|
| 454 |
+
|
| 455 |
+
`--seed`は省略可能です。指定しない場合はランダムなシードが使用されます。
|
| 456 |
+
|
| 457 |
+
`--video_length`は「4の倍数+1」を指定してください。
|
| 458 |
+
|
| 459 |
+
`--flow_shift`にタイムステップのシフト値(discrete flow shift)を指定可能です。省略時のデフォルト値は7.0で、これは推論ステップ数が50の時の推奨値です。HunyuanVideoの論文では、ステップ数50の場合は7.0、ステップ数20未満(10など)で17.0が推奨されています。
|
| 460 |
+
|
| 461 |
+
`--video_path`に読み込む動画を指定すると、video2videoの推論が可能です。動画ファイルを指定するか、複数の画像ファイルが入ったディレクトリを指定してください(画像ファイルはファイル名でソートされ、各フレームとして用いられます)。`--video_length`よりも短い動画を指定するとエラーになります。`--strength`で強度を指定できます。0~1.0で指定でき、大きいほど元の動画からの変化が大きくなります。
|
| 462 |
+
|
| 463 |
+
なおvideo2video推論の処理は実験的なものです。
|
| 464 |
+
|
| 465 |
+
`--compile`オプションでPyTorchのコンパイル機能を有効にします(実験的機能)。tritonのインストールが必要です。また、WindowsではVisual C++ build toolsが必要で、かつPyTorch>=2.6.0でのみ動作します。詳細は[torch.compileのドキュメント](torch_compile.md)を参照してください。
|
| 466 |
+
|
| 467 |
+
`--compile`は初回実行時にかなりの時間がかかりますが、2回目以降は高速化されます。
|
| 468 |
+
|
| 469 |
+
`--save_merged_model`オプションで、LoRAマージ後のDiTモデルを保存できます。`--save_merged_model path/to/merged_model.safetensors`のように指定してください。なおこのオプションを指定すると推論は行われません。
|
| 470 |
+
|
| 471 |
+
</details>
|
| 472 |
+
|
| 473 |
+
### Inference with SkyReels V1 / SkyReels V1での推論
|
| 474 |
+
|
| 475 |
+
SkyReels V1 T2V and I2V models are supported (inference only).
|
| 476 |
+
|
| 477 |
+
The model can be downloaded from [here](https://huggingface.co/Kijai/SkyReels-V1-Hunyuan_comfy). Many thanks to Kijai for providing the model. `skyreels_hunyuan_i2v_bf16.safetensors` is the I2V model, and `skyreels_hunyuan_t2v_bf16.safetensors` is the T2V model. The models other than bf16 are not tested (`fp8_e4m3fn` may work).
|
| 478 |
+
|
| 479 |
+
For T2V inference, add the following options to the inference command:
|
| 480 |
+
|
| 481 |
+
```bash
|
| 482 |
+
--guidance_scale 6.0 --embedded_cfg_scale 1.0 --negative_prompt "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" --split_uncond
|
| 483 |
+
```
|
| 484 |
+
|
| 485 |
+
SkyReels V1 seems to require a classfier free guidance (negative prompt).`--guidance_scale` is a guidance scale for the negative prompt. The recommended value is 6.0 from the official repository. The default is 1.0, it means no classifier free guidance.
|
| 486 |
+
|
| 487 |
+
`--embedded_cfg_scale` is a scale of the embedded guidance. The recommended value is 1.0 from the official repository (it may mean no embedded guidance).
|
| 488 |
+
|
| 489 |
+
`--negative_prompt` is a negative prompt for the classifier free guidance. The above sample is from the official repository. If you don't specify this, and specify `--guidance_scale` other than 1.0, an empty string will be used as the negative prompt.
|
| 490 |
+
|
| 491 |
+
`--split_uncond` is a flag to split the model call into unconditional and conditional parts. This reduces VRAM usage but may slow down inference. If `--split_attn` is specified, `--split_uncond` is automatically set.
|
| 492 |
+
|
| 493 |
+
You can also perform image2video inference with SkyReels V1 I2V model. Specify the image file path with `--image_path`. The image will be resized to the given `--video_size`.
|
| 494 |
+
|
| 495 |
+
```bash
|
| 496 |
+
--image_path path/to/image.jpg
|
| 497 |
+
```
|
| 498 |
+
|
| 499 |
+
<details>
|
| 500 |
+
<summary>日本語</summary>
|
| 501 |
+
|
| 502 |
+
SkyReels V1のT2VとI2Vモデルがサポートされています(推論のみ)。
|
| 503 |
+
|
| 504 |
+
モデルは[こちら](https://huggingface.co/Kijai/SkyReels-V1-Hunyuan_comfy)からダウンロードできます。モデルを提供してくださったKijai氏に感謝します。`skyreels_hunyuan_i2v_bf16.safetensors`がI2Vモデル、`skyreels_hunyuan_t2v_bf16.safetensors`がT2Vモデルです。`bf16`以外の形式は未検証です(`fp8_e4m3fn`は動作するかもしれません)。
|
| 505 |
+
|
| 506 |
+
T2V推論を行う場合、以下のオプションを推論コマンドに追加してください:
|
| 507 |
+
|
| 508 |
+
```bash
|
| 509 |
+
--guidance_scale 6.0 --embedded_cfg_scale 1.0 --negative_prompt "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" --split_uncond
|
| 510 |
+
```
|
| 511 |
+
|
| 512 |
+
SkyReels V1はclassifier free guidance(ネガティブプロンプト)を必要とするようです。`--guidance_scale`はネガティブプロンプトのガイダンススケールです。公式リポジトリの推奨値は6.0です。デフォルトは1.0で、この場合はclassifier free guidanceは使用されません(ネガティブプロンプトは無視されます)。
|
| 513 |
+
|
| 514 |
+
`--embedded_cfg_scale`は埋め込みガイダンスのスケールです。公式リポジトリの推奨値は1.0です(埋め込みガイダンスなしを意味すると思われます)。
|
| 515 |
+
|
| 516 |
+
`--negative_prompt`はいわゆるネガティブプロンプトです。上記のサンプルは公式リポジトリのものです。`--guidance_scale`を指定し、`--negative_prompt`を指定しなかった場合は、空文字列が使用されます。
|
| 517 |
+
|
| 518 |
+
`--split_uncond`を指定すると、モデル呼び出しをuncondとcond(ネガティブプロンプトとプロンプト)に分割します。VRAM使用量が減りますが、推論速度は低下する可能性があります。`--split_attn`が指定されている場合、`--split_uncond`は自動的に有効になります。
|
| 519 |
+
|
| 520 |
+
</details>
|
| 521 |
+
|
| 522 |
+
### Convert LoRA to another format / LoRAの形式の変換
|
| 523 |
+
|
| 524 |
+
You can convert LoRA to a format (presumed to be Diffusion-pipe) compatible with another inference environment (Diffusers, ComfyUI etc.) using the following command:
|
| 525 |
+
|
| 526 |
+
```bash
|
| 527 |
+
python src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other
|
| 528 |
+
```
|
| 529 |
+
|
| 530 |
+
or for uv:
|
| 531 |
+
|
| 532 |
+
```bash
|
| 533 |
+
uv run --extra cu124 src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other
|
| 534 |
+
```
|
| 535 |
+
|
| 536 |
+
Specify the input and output file paths with `--input` and `--output`, respectively.
|
| 537 |
+
|
| 538 |
+
Specify `other` for `--target`. Use `default` to convert from another format to the format of this repository.
|
| 539 |
+
|
| 540 |
+
<details>
|
| 541 |
+
<summary>日本語</summary>
|
| 542 |
+
|
| 543 |
+
他の推論環境(DiffusersやComfyUI)で使用可能な形式(Diffusion-pipe または Diffusers と思われる)への変換は以下のコマンドで行えます。
|
| 544 |
+
|
| 545 |
+
```bash
|
| 546 |
+
python src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other
|
| 547 |
+
```
|
| 548 |
+
|
| 549 |
+
`--input`と`--output`はそれぞれ入力と出力のファイルパスを指定してください。
|
| 550 |
+
|
| 551 |
+
`--target`には`other`を指定してください。`default`を指定すると、他の形式から当リポジトリの形式に変換できます。
|
| 552 |
+
|
| 553 |
+
</details>
|
docs/hunyuan_video_1_5.md
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HunyuanVideo 1.5
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of HunyuanVideo 1.5 architecture within the Musubi Tuner framework. HunyuanVideo 1.5 is a video generation model that supports both text-to-video (T2V) and image-to-video (I2V) generation.
|
| 6 |
+
|
| 7 |
+
Pre-caching, training, and inference options can be found via `--help`. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 8 |
+
|
| 9 |
+
This feature is experimental.
|
| 10 |
+
|
| 11 |
+
<details>
|
| 12 |
+
<summary>日本語</summary>
|
| 13 |
+
|
| 14 |
+
このドキュメントは、Musubi Tunerフレームワーク内でのHunyuanVideo 1.5アーキテクチャの使用法について説明しています。HunyuanVideo 1.5はテキストから動画を生成(T2V)、および画像から動画を生成(I2V)することができるモデルです。
|
| 15 |
+
|
| 16 |
+
事前キャッシング、学習、推論のオプションは`--help`で確認してください。HunyuanVideoと共通のオプションが多くありますので、必要に応じて[HunyuanVideoのドキュメント](./hunyuan_video.md)も参照してください。
|
| 17 |
+
|
| 18 |
+
この機能は実験的なものです。
|
| 19 |
+
|
| 20 |
+
</details>
|
| 21 |
+
|
| 22 |
+
## Download the model / モデルのダウンロード
|
| 23 |
+
|
| 24 |
+
You need to download the DiT, VAE, Text Encoder (Qwen2.5-VL), and BYT5 models.
|
| 25 |
+
|
| 26 |
+
- **DiT**: Download from [HuggingFace's HunyuanVideo 1.5 site](https://huggingface.co/tencent/HunyuanVideo-1.5/tree/main). Use `transformer/720p_i2v/diffusion_pytorch_model.safetensors` for I2V DiT and `transformer/720p_t2v/diffusion_pytorch_model.safetensors` for T2V DiT.
|
| 27 |
+
Alternatively, you can use `split_files/diffusion_models/hunyuanvideo1.5_720p_i2v_fp16.safetensors` and `split_files/diffusion_models/hunyuanvideo1.5_720p_t2v_fp16.safetensors` from [ComfyUI's HunyuanVideo 1.5 weights](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main), but do not use these for bf16 training as the weights are converted to fp16.
|
| 28 |
+
|
| 29 |
+
- **Text Encoder (Qwen2.5-VL)**: Download from [ComfyUI's HunyuanVideo 1.5 weights](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main). Use `split_files/text_encoders/qwen_2.5_vl_7b.safetensors`.
|
| 30 |
+
|
| 31 |
+
- **BYT5**: Download from [ComfyUI's HunyuanVideo 1.5 weights](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main). Use `split_files/text_encoders/byt5_small_glyphxl_fp16.safetensors`.
|
| 32 |
+
|
| 33 |
+
For I2V training or inference, you also need:
|
| 34 |
+
|
| 35 |
+
- **Image Encoder (SigLIP)**: Download from [ComfyUI's HunyuanVideo 1.5 weights](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main). Use `split_files/clip_vision/sigclip_vision_patch14_384.safetensors`.
|
| 36 |
+
|
| 37 |
+
<details>
|
| 38 |
+
<summary>日本語</summary>
|
| 39 |
+
|
| 40 |
+
DiT, VAE, Text Encoder (Qwen2.5-VL), BYT5 のモデルをダウンロードする必要があります。
|
| 41 |
+
|
| 42 |
+
- **DiT**: [HuggingFaceのHunyuanVideo 1.5のサイト](https://huggingface.co/tencent/HunyuanVideo-1.5/tree/main) からダウンロードしてください。
|
| 43 |
+
I2VのDiTには`transformer/720p_i2v/diffusion_pytorch_model.safetensors`を、T2VのDiTには`transformer/720p_t2v/diffusion_pytorch_model.safetensors`を使用してください。
|
| 44 |
+
[ComfyUIのHunyuanVideo 1.5用の重み](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main)の`split_files/diffusion_models/hunyuanvideo1.5_720p_i2v_fp16.safetensors`および`split_files/diffusion_models/hunyuanvideo1.5_720p_t2v_fp16.safetensors`も使用可能ですが、重みがfp16に変換されているため、bf16学習の時には使用しないでください。
|
| 45 |
+
|
| 46 |
+
- **VAE**: [HuggingFaceのHunyuanVideo 1.5のサイト](https://huggingface.co/tencent/HunyuanVideo-1.5/tree/main) から `vae/diffusion_pytorch_model.safetensors` をダウンロードしてください。
|
| 47 |
+
または、[ComfyUIのHunyuanVideo 1.5用の重み](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main)の`split_files/vae/hunyuanvideo15_vae_fp16.safetensors`も使用可能です。
|
| 48 |
+
|
| 49 |
+
- **Text Encoder (Qwen2.5-VL)**: [ComfyUIのHunyuanVideo 1.5用の重み](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main)から`split_files/text_encoders/qwen_2.5_vl_7b.safetensors`をダウンロードしてください。
|
| 50 |
+
|
| 51 |
+
- **BYT5**: [ComfyUIのHunyuanVideo 1.5用の重み](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main)から`split_files/text_encoders/byt5_small_glyphxl_fp16.safetensors`をダウンロードしてください。
|
| 52 |
+
|
| 53 |
+
I2V学習または推論を行う場合は、さらに以下が必要です:
|
| 54 |
+
|
| 55 |
+
- **Image Encoder (SigLIP)**: [ComfyUIのHunyuanVideo 1.5用の重み](https://huggingface.co/Comfy-Org/HunyuanVideo_1.5_repackaged/tree/main)から `split_files/clip_vision/sigclip_vision_patch14_384.safetensors` をダウンロードしてください。
|
| 56 |
+
|
| 57 |
+
</details>
|
| 58 |
+
|
| 59 |
+
## Pre-caching / 事前キャッシング
|
| 60 |
+
|
| 61 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 62 |
+
|
| 63 |
+
Latent pre-caching uses a dedicated script for HunyuanVideo 1.5.
|
| 64 |
+
|
| 65 |
+
```bash
|
| 66 |
+
python src/musubi_tuner/hv_1_5_cache_latents.py \
|
| 67 |
+
--dataset_config path/to/toml \
|
| 68 |
+
--vae path/to/vae_model
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
- Uses `hv_1_5_cache_latents.py`.
|
| 72 |
+
- The dataset can be either an image dataset or a video dataset.
|
| 73 |
+
- `--vae_sample_size` option sets the VAE sample size for tiling. Default is 128. Set to 256 if VRAM is sufficient for better quality. Set to 0 to disable tiling (highest quality but consumes a lot of VRAM).
|
| 74 |
+
- `--vae_enable_patch_conv` option enables patch-based convolution in VAE for memory optimization (less effective than `--vae_sample_size`). No quality degradation.
|
| 75 |
+
- For I2V training, specify `--i2v` and `--image_encoder path/to/image_encoder` to cache image features and conditional latents.
|
| 76 |
+
|
| 77 |
+
<details>
|
| 78 |
+
<summary>日本語</summary>
|
| 79 |
+
|
| 80 |
+
latentの事前キャッシングはHunyuanVideo 1.5専用のスクリプトを使用します。
|
| 81 |
+
|
| 82 |
+
- `hv_1_5_cache_latents.py`を使用します。
|
| 83 |
+
- データセットは画像データセットまたは動画データセットのいずれかです。
|
| 84 |
+
- `--vae_sample_size`オプションでVAEのタイリング用サンプルサイズを設定します。デフォルトは128です。VRAMが十分な場合は256に設定すると品質が向上します。0に設定するとタイリングを無効にします(最良の品質ですが非常に多くのVRAMを消費します)。
|
| 85 |
+
- `--vae_enable_patch_conv`オプションでVAEのパッチベース畳み込みを有効にし、メモリを最適化します(メモリ削減効果は`--vae_sample_size`よりも落ちます)。品質の劣化はありません。
|
| 86 |
+
- I2V学習の場合は、`--i2v`と`--image_encoder path/to/image_encoder`を指定して、画像の特徴と条件付きlatentをキャッシュします。
|
| 87 |
+
|
| 88 |
+
</details>
|
| 89 |
+
|
| 90 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 91 |
+
|
| 92 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 93 |
+
|
| 94 |
+
```bash
|
| 95 |
+
python src/musubi_tuner/hv_1_5_cache_text_encoder_outputs.py \
|
| 96 |
+
--dataset_config path/to/toml \
|
| 97 |
+
--text_encoder path/to/text_encoder \
|
| 98 |
+
--byt5 path/to/byt5 \
|
| 99 |
+
--batch_size 16
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
- Uses `hv_1_5_cache_text_encoder_outputs.py`.
|
| 103 |
+
- Requires both `--text_encoder` (Qwen2.5-VL) and `--byt5` arguments.
|
| 104 |
+
- Use `--fp8_vl` option to run the Qwen2.5-VL Text Encoder in fp8 mode for VRAM savings.
|
| 105 |
+
- The larger the batch size, the more VRAM is required. Adjust `--batch_size` according to your VRAM capacity.
|
| 106 |
+
|
| 107 |
+
<details>
|
| 108 |
+
<summary>日本語</summary>
|
| 109 |
+
|
| 110 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 111 |
+
|
| 112 |
+
- `hv_1_5_cache_text_encoder_outputs.py`を使用します。
|
| 113 |
+
- `--text_encoder`(Qwen2.5-VL)と`--byt5`の両方の引数が必要です。
|
| 114 |
+
- Qwen2.5-VLテキストエンコーダーをfp8モードで実行するための`--fp8_vl`オプションを使用します。
|
| 115 |
+
- バッチサイズが大きいほど、より多くのVRAMが必要です。VRAM容量に応じて`--batch_size`を調整してください。
|
| 116 |
+
|
| 117 |
+
</details>
|
| 118 |
+
|
| 119 |
+
## Training / 学習
|
| 120 |
+
|
| 121 |
+
Training uses a dedicated script `hv_1_5_train_network.py`.
|
| 122 |
+
|
| 123 |
+
### Text-to-Video (T2V) Training
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_1_5_train_network.py \
|
| 127 |
+
--dit path/to/dit_model \
|
| 128 |
+
--vae path/to/vae_model \
|
| 129 |
+
--text_encoder path/to/text_encoder \
|
| 130 |
+
--byt5 path/to/byt5 \
|
| 131 |
+
--dataset_config path/to/toml \
|
| 132 |
+
--task t2v \
|
| 133 |
+
--sdpa --mixed_precision bf16 \
|
| 134 |
+
--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0
|
| 135 |
+
--optimizer_type adamw8bit --learning_rate 1e-4 --gradient_checkpointing \
|
| 136 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 137 |
+
--network_module networks.lora_hv_1_5 --network_dim 32 \
|
| 138 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 139 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
### Image-to-Video (I2V) Training
|
| 143 |
+
|
| 144 |
+
For I2V training, specify `--task i2v` and provide the `--image_encoder` path:
|
| 145 |
+
|
| 146 |
+
```bash
|
| 147 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_1_5_train_network.py \
|
| 148 |
+
--dit path/to/dit_model \
|
| 149 |
+
--vae path/to/vae_model \
|
| 150 |
+
--text_encoder path/to/text_encoder \
|
| 151 |
+
--byt5 path/to/byt5 \
|
| 152 |
+
--image_encoder path/to/image_encoder \
|
| 153 |
+
--dataset_config path/to/toml \
|
| 154 |
+
--task i2v \
|
| 155 |
+
--sdpa --mixed_precision bf16 \
|
| 156 |
+
--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0 \
|
| 157 |
+
--optimizer_type adamw8bit --learning_rate 1e-4 --gradient_checkpointing \
|
| 158 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 159 |
+
--network_module networks.lora_hv_1_5 --network_dim 32 \
|
| 160 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 161 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
- Uses `hv_1_5_train_network.py`.
|
| 165 |
+
- **Requires** specifying `--vae`, `--text_encoder`, and `--byt5`.
|
| 166 |
+
- **Requires** specifying `--network_module networks.lora_hv_1_5`.
|
| 167 |
+
- **Requires** specifying `--task` as either `t2v` or `i2v`.
|
| 168 |
+
- For I2V training, `--image_encoder` is required.
|
| 169 |
+
- It is not yet clear whether `--mixed_precision bf16` or `fp16` is better for HunyuanVideo 1.5 training.
|
| 170 |
+
- The timestep sampling settings for HunyuanVideo 1.5 training are unclear, but it may be good to base them on `--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0` and adjust as needed.
|
| 171 |
+
- The recommended optimizer is `--optimizer_type Muon`, but it is only available in PyTorch 2.9 and later. If your PyTorch version is older, use `--optimizer_type adamw8bit` or similar.
|
| 172 |
+
- Memory saving options like `--fp8_base` and `--fp8_scaled` (for DiT) and `--fp8_vl` (for Text Encoder) are available.
|
| 173 |
+
- `--gradient_checkpointing` is available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 174 |
+
|
| 175 |
+
<details>
|
| 176 |
+
<summary>日本語</summary>
|
| 177 |
+
|
| 178 |
+
HunyuanVideo 1.5の学習は専用のスクリプト`hv_1_5_train_network.py`を使用します。
|
| 179 |
+
|
| 180 |
+
**Text-to-Video (T2V) 学習**
|
| 181 |
+
|
| 182 |
+
コマンド例は英語版を参照してください。
|
| 183 |
+
|
| 184 |
+
**Image-to-Video (I2V) 学習**
|
| 185 |
+
|
| 186 |
+
I2V学習を行う場合、`--task i2v`を指定し、`--image_encoder`パスを提供します:
|
| 187 |
+
|
| 188 |
+
コマンド例は英語版を参照してください。
|
| 189 |
+
|
| 190 |
+
- `hv_1_5_train_network.py`を使用します。
|
| 191 |
+
- `--vae`、`--text_encoder`、`--byt5`を指定する必要があります。
|
| 192 |
+
- `--network_module networks.lora_hv_1_5`を指定する必要があります。
|
| 193 |
+
- `--task`に`t2v`または`i2v`を指定する必要があります。
|
| 194 |
+
- I2V学習の場合は、`--image_encoder`が必要です。
|
| 195 |
+
- HunyuanVideo 1.5の学習に`--mixed_precision bf16`と`fp16`のどちらが良いかはまだ不明です。
|
| 196 |
+
- HunyuanVideo 1.5のタイムステップサンプリング設定は不明ですが、`--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0`をベースに調整すると良いかもしれません。
|
| 197 |
+
- オプティマイザには`--optimizer_type Muon`を推奨しますが、PyTorch 2.9以降でのみ利用可能です。PyTorchのバージョンが古い場合は`--optimizer_type adamw8bit`などを使用してください。
|
| 198 |
+
- `--fp8_base`、`--fp8_scaled`(DiT用)や`--fp8_vl`(テキストエンコーダー用)などのメモリ節約オプションが利用可能です。
|
| 199 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。詳細は[HunyuanVideoドキュメント](./hunyuan_video.md#memory-optimization)を参照してください。
|
| 200 |
+
|
| 201 |
+
</details>
|
| 202 |
+
|
| 203 |
+
### Memory Optimization
|
| 204 |
+
|
| 205 |
+
- `--fp8_base` and `--fp8_scaled` options are available to reduce memory usage of DiT (specify both together). Quality may degrade slightly.
|
| 206 |
+
- `--fp8_vl` option is available to reduce memory usage of Text Encoder (Qwen2.5-VL).
|
| 207 |
+
- `--vae_sample_size` (default 128) controls VAE tiling size. Set to 256 if VRAM is sufficient for better quality. Set to 0 to disable tiling.
|
| 208 |
+
- `--vae_enable_patch_conv` enables patch-based convolution in VAE for memory optimization.
|
| 209 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 210 |
+
- `--blocks_to_swap` option is available to offload some blocks to CPU. The maximum number of blocks that can be offloaded is 51.
|
| 211 |
+
|
| 212 |
+
<details>
|
| 213 |
+
<summary>日本語</summary>
|
| 214 |
+
|
| 215 |
+
- DiTのメモリ使用量を削減するために、`--fp8_base`と`--fp8_scaled`オプションを指定可能です(同時に指定してください)。品質はやや低下する可能性があります。
|
| 216 |
+
- Text Encoder (Qwen2.5-VL)のメモリ使用量を削減するために、`--fp8_vl`オプションを指定可能です。
|
| 217 |
+
- `--vae_sample_size`(デフォルト128)でVAEのタイリングサイズを制御します。VRAMが十分な場合は256に設定すると品質が向上します。0に設定するとタイリングを無効にします。
|
| 218 |
+
- `--vae_enable_patch_conv`でVAEのパッチベース畳み込みを有効にし、メモリを最適化します。
|
| 219 |
+
- メモリ節約のために`--gradient_checkpointing`と`--gradient_checkpointing_cpu_offload`が利用可能です。詳細は[HunyuanVideoドキュメント](./hunyuan_video.md#memory-optimization)を参照してください。
|
| 220 |
+
- `--blocks_to_swap`オプションで、一部のブロックをCPUにオフロードできます。オフロード可能な最大ブロック数は51です。
|
| 221 |
+
|
| 222 |
+
</details>
|
| 223 |
+
|
| 224 |
+
### Attention
|
| 225 |
+
|
| 226 |
+
- `--sdpa` for PyTorch's scaled dot product attention (does not require additional dependencies).
|
| 227 |
+
- `--flash_attn` for [FlashAttention](https://github.com/Dao-AILab/flash-attention).
|
| 228 |
+
- `--xformers` for xformers (requires `--split_attn`).
|
| 229 |
+
- `--sage_attn` for SageAttention (not yet supported for training).
|
| 230 |
+
- `--split_attn` processes attention in chunks, reducing VRAM usage slightly.
|
| 231 |
+
|
| 232 |
+
<details>
|
| 233 |
+
<summary>日本語</summary>
|
| 234 |
+
|
| 235 |
+
- `--sdpa`でPyTorchのscaled dot product attentionを使用(追加の依存ライブラリを必要としません)。
|
| 236 |
+
- `--flash_attn`で[FlashAttention](https://github.com/Dao-AILab/flash-attention)を使用。
|
| 237 |
+
- `--xformers`でxformersの利用も可能(`--split_attn`が必要)。
|
| 238 |
+
- `--sage_attn`でSageAttentionを使用(現時点では学習に未対応)。
|
| 239 |
+
- `--split_attn`を指定すると、attentionを分割して処理し、VRAM使用量をわずかに減らします。
|
| 240 |
+
|
| 241 |
+
</details>
|
| 242 |
+
|
| 243 |
+
### Other Options
|
| 244 |
+
|
| 245 |
+
For sample video generation during training, PyTorch Dynamo optimization, and other advanced configurations, refer to the [HunyuanVideo documentation](./hunyuan_video.md).
|
| 246 |
+
|
| 247 |
+
<details>
|
| 248 |
+
<summary>日本語</summary>
|
| 249 |
+
|
| 250 |
+
学習中のサンプル動画生成、PyTorch Dynamoによる最適化、その他の高度な設定については、[HunyuanVideoドキュメント](./hunyuan_video.md)を参照してください。
|
| 251 |
+
|
| 252 |
+
</details>
|
| 253 |
+
|
| 254 |
+
### Coverting LoRA weights to ComfyUI format / LoRA重みをComfyUI形式に変換する
|
| 255 |
+
|
| 256 |
+
A script is provided to convert HunyuanVideo 1.5 LoRA weights to ComfyUI format.
|
| 257 |
+
|
| 258 |
+
```bash
|
| 259 |
+
python src/musubi_tuner/networks/convert_hunyuan_video_1_5_lora_to_comfy.py \
|
| 260 |
+
path/to/hv_1_5_lora.safetensors \
|
| 261 |
+
path/to/output_comfy_lora.safetensors
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
- The script is `convert_hunyuan_video_1_5_lora_to_comfy.py`.
|
| 265 |
+
- The first argument is the input HunyuanVideo 1.5 LoRA weights file.
|
| 266 |
+
- The second argument is the output ComfyUI-format LoRA weights file.
|
| 267 |
+
- `--reverse` option is available to convert from ComfyUI format to HunyuanVideo 1.5 format. Only works for LoRA weights converted by this script.
|
| 268 |
+
|
| 269 |
+
<details>
|
| 270 |
+
<summary>日本語</summary>
|
| 271 |
+
|
| 272 |
+
HunyuanVideo 1.5のLoRA重みをComfyUI形式に変換するスクリプトが提供されています。
|
| 273 |
+
|
| 274 |
+
- スクリプトは`convert_hunyuan_video_1_5_lora_to_comfy.py`です。
|
| 275 |
+
- 最初の引数は入力のHunyuanVideo 1.5 LoRA重みファイルです。
|
| 276 |
+
- 2番目の引数は出力のComfyUI形式のLoRA重みファイルです。
|
| 277 |
+
- `--reverse`オプションで、ComfyUI形式からHunyuanVideo 1.5形式への変換も可能です。このオプションは、このスクリプトで変換されたLoRA重みに対してのみ機能します。
|
| 278 |
+
|
| 279 |
+
</details>
|
| 280 |
+
|
| 281 |
+
## Inference / 推論
|
| 282 |
+
|
| 283 |
+
Inference uses a dedicated script `hv_1_5_generate_video.py`.
|
| 284 |
+
|
| 285 |
+
The recommended number of frames is 121 and the recommended number of inference steps is 50 in the official script, but the samples below use smaller values.
|
| 286 |
+
|
| 287 |
+
### Text-to-Video (T2V) Inference
|
| 288 |
+
|
| 289 |
+
```bash
|
| 290 |
+
python src/musubi_tuner/hv_1_5_generate_video.py \
|
| 291 |
+
--dit path/to/dit_model \
|
| 292 |
+
--vae path/to/vae_model \
|
| 293 |
+
--text_encoder path/to/text_encoder \
|
| 294 |
+
--byt5 path/to/byt5 \
|
| 295 |
+
--prompt "A cat" \
|
| 296 |
+
--video_size 720 1280 --video_length 21 --infer_steps 25 \
|
| 297 |
+
--attn_mode sdpa --fp8_scaled \
|
| 298 |
+
--save_path path/to/save/dir --output_type video \
|
| 299 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 300 |
+
```
|
| 301 |
+
|
| 302 |
+
### Image-to-Video (I2V) Inference
|
| 303 |
+
|
| 304 |
+
For I2V inference, specify the `--image_path` and `--image_encoder`:
|
| 305 |
+
|
| 306 |
+
```bash
|
| 307 |
+
python src/musubi_tuner/hv_1_5_generate_video.py \
|
| 308 |
+
--dit path/to/dit_model \
|
| 309 |
+
--vae path/to/vae_model \
|
| 310 |
+
--text_encoder path/to/text_encoder \
|
| 311 |
+
--byt5 path/to/byt5 \
|
| 312 |
+
--image_encoder path/to/image_encoder \
|
| 313 |
+
--image_path path/to/image.jpg \
|
| 314 |
+
--prompt "A cat walking" \
|
| 315 |
+
--video_size 720 1280 --video_length 21 --infer_steps 25 \
|
| 316 |
+
--attn_mode torch --fp8_scaled \
|
| 317 |
+
--save_path path/to/save/dir --output_type video \
|
| 318 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 319 |
+
```
|
| 320 |
+
|
| 321 |
+
- Uses `hv_1_5_generate_video.py`.
|
| 322 |
+
- **Requires** specifying `--vae`, `--text_encoder`, and `--byt5`.
|
| 323 |
+
- For I2V inference, `--image_path` and `--image_encoder` are required.
|
| 324 |
+
- `--video_size` is the size of the generated video, height and width are specified in that order.
|
| 325 |
+
- `--video_length` should be specified as "a multiple of 4 plus 1".
|
| 326 |
+
- `--prompt`: Prompt for generation.
|
| 327 |
+
- `--fp8_scaled` option is available for DiT to reduce memory usage. Quality may be slightly lower.
|
| 328 |
+
- `--vae_sample_size` (default 128) controls VAE tiling size. Set to 256 if VRAM is sufficient for better quality. Set to 0 to disable tiling.
|
| 329 |
+
- `--vae_enable_patch_conv` enables patch-based convolution in VAE for memory optimization.
|
| 330 |
+
- `--blocks_to_swap` option is available to offload some blocks to CPU. The maximum number of blocks that can be offloaded is 51.
|
| 331 |
+
- LoRA loading options (`--lora_weight`, `--lora_multiplier`, `--include_patterns`, `--exclude_patterns`) are available. `--lycoris` is also supported.
|
| 332 |
+
- `--guidance_scale` (default 6.0) controls the classifier-free guidance scale.
|
| 333 |
+
- `--flow_shift` (default 7.0) controls the discrete flow shift.
|
| 334 |
+
- `--save_merged_model` option is available to save the DiT model after merging LoRA weights. Inference is skipped if this is specified.
|
| 335 |
+
|
| 336 |
+
For 121 frames at 720p (1280x720) size, VRAM usage is around 20GB even with `--blocks_to_swap 51`.
|
| 337 |
+
|
| 338 |
+
<details>
|
| 339 |
+
<summary>日本語</summary>
|
| 340 |
+
|
| 341 |
+
HunyuanVideo 1.5の推論は専用のスクリプト`hv_1_5_generate_video.py`を使用します。
|
| 342 |
+
|
| 343 |
+
公式スクリプトの推奨フレーム数は121、推論ステップ数は50ですが、サンプルでは少なめにしています。
|
| 344 |
+
|
| 345 |
+
**Text-to-Video (T2V) 推論**
|
| 346 |
+
|
| 347 |
+
コマンド例は英語版を参照してください。
|
| 348 |
+
|
| 349 |
+
**Image-to-Video (I2V) 推論**
|
| 350 |
+
|
| 351 |
+
I2V推論を行う場合、`--image_path`と`--image_encoder`を指定します:
|
| 352 |
+
|
| 353 |
+
コマンド例は英語版を参照してください。
|
| 354 |
+
|
| 355 |
+
- `hv_1_5_generate_video.py`を使用します。
|
| 356 |
+
- `--vae`、`--text_encoder`、`--byt5`を指定する必要があります。
|
| 357 |
+
- I2V推論の場合は、`--image_path`と`--image_encoder`が必要です。
|
| 358 |
+
- `--video_size`は生成する動画のサイズで、高さと幅をその順番で指定します。
|
| 359 |
+
- `--video_length`は「4の倍数+1」を指定してください。
|
| 360 |
+
- `--prompt`: 生成用のプロンプトです。
|
| 361 |
+
- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。品質はやや低下する可能性があります。
|
| 362 |
+
- `--blocks_to_swap`オプションで、一部のブロックをCPUにオフロードできます。オフロード可能な最大ブロック数は51です。
|
| 363 |
+
- `--vae_sample_size`(デフォルト128)でVAEのタイリングサイズを制御します。VRAMが十分な場合は256に設定すると品質が向上します。0に設定するとタイリングを無効にします。
|
| 364 |
+
- `--vae_enable_patch_conv`でVAEのパッチベース畳み込みを有効にし、メモリを最適化します。
|
| 365 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。
|
| 366 |
+
- `--guidance_scale`(デフォルト6.0)は、classifier-free guidanceスケールを制御します。
|
| 367 |
+
- `--flow_shift`(デフォルト7.0)は、discrete flow shiftを制御します。
|
| 368 |
+
- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。
|
| 369 |
+
|
| 370 |
+
720p (1280x720) サイズで121フレームの場合、`--blocks_to_swap 51`を指定してもVRAM使用量は約20GB程度になります。
|
| 371 |
+
|
| 372 |
+
</details>
|
docs/kandinsky5.md
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# Kandinsky 5
|
| 4 |
+
|
| 5 |
+
## Overview / 概要
|
| 6 |
+
|
| 7 |
+
This is an unofficial training and inference script for [Kandinsky 5](https://github.com/ai-forever/Kandinsky-5). The features are as follows:
|
| 8 |
+
|
| 9 |
+
- fp8 support and memory reduction by block swap
|
| 10 |
+
- Inference without installing Flash attention (using PyTorch's scaled dot product attention)
|
| 11 |
+
- LoRA training for text-to-video (T2V) and image-to-video (I2V, Pro) models
|
| 12 |
+
|
| 13 |
+
This feature is experimental.
|
| 14 |
+
|
| 15 |
+
<details>
|
| 16 |
+
<summary>日本語</summary>
|
| 17 |
+
|
| 18 |
+
[Kandinsky 5](https://github.com/ai-forever/Kandinsky-5) の非公式の学習および推論スクリプトです。
|
| 19 |
+
|
| 20 |
+
以下の特徴があります:
|
| 21 |
+
|
| 22 |
+
- fp8対応およびblock swapによる省メモリ化
|
| 23 |
+
- Flash attentionのインストールなしでの実行(PyTorchのscaled dot product attentionを使用)
|
| 24 |
+
- テキストから動画(T2V)および画像から動画(I2V、Pro)モデルのLoRA学習
|
| 25 |
+
|
| 26 |
+
この機能は実験的なものです。
|
| 27 |
+
|
| 28 |
+
</details>
|
| 29 |
+
|
| 30 |
+
## Download the model / モデルのダウンロード
|
| 31 |
+
|
| 32 |
+
Download the model weights from the [Kandinsky 5.0 Collection](https://huggingface.co/collections/ai-forever/kandinsky-50) on Hugging Face.
|
| 33 |
+
|
| 34 |
+
### DiT Model / DiTモデル
|
| 35 |
+
|
| 36 |
+
This document focuses on **Pro** models. The trainer also works with **Lite** models.
|
| 37 |
+
本ドキュメントでは **Pro** モデルを中心に説明しますが、トレーナーは **Lite** モデルでも動作します。
|
| 38 |
+
|
| 39 |
+
Download a Pro DiT `.safetensors` checkpoint from the Kandinsky 5.0 Collection (e.g. `kandinsky5pro_t2v_pretrain_5s.safetensors` or `kandinsky5pro_i2v_sft_5s.safetensors`).
|
| 40 |
+
|
| 41 |
+
### VAE
|
| 42 |
+
|
| 43 |
+
Kandinsky 5 uses the HunyuanVideo 3D VAE. Download `diffusion_pytorch_model.safetensors` (or `pytorch_model.pt`) from:
|
| 44 |
+
https://huggingface.co/hunyuanvideo-community/HunyuanVideo
|
| 45 |
+
|
| 46 |
+
### Text Encoders / テキストエンコーダ
|
| 47 |
+
|
| 48 |
+
Kandinsky 5 uses Qwen2.5-VL-7B and CLIP for text encoding.
|
| 49 |
+
|
| 50 |
+
**Qwen2.5-VL-7B**: Download from https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct (or use the path to your local Qwen/Qwen2.5-VL-7B-Instruct model)
|
| 51 |
+
|
| 52 |
+
**CLIP**: Use the Hugging Face Transformers model `openai/clip-vit-large-patch14`.
|
| 53 |
+
|
| 54 |
+
Pass either the model ID (e.g., `--text_encoder_clip openai/clip-vit-large-patch14`) or a path to the locally cached snapshot directory.
|
| 55 |
+
|
| 56 |
+
### Directory Structure / ディレクトリ構造
|
| 57 |
+
|
| 58 |
+
Place them in your chosen directory structure:
|
| 59 |
+
|
| 60 |
+
```
|
| 61 |
+
weights/
|
| 62 |
+
├── model/
|
| 63 |
+
│ └── kandinsky5pro_t2v_pretrain_5s.safetensors
|
| 64 |
+
├── vae/
|
| 65 |
+
│ └── diffusion_pytorch_model.safetensors
|
| 66 |
+
├── text_encoder/
|
| 67 |
+
│ └── (Qwen2.5-VL-7B files)
|
| 68 |
+
└── text_encoder2/
|
| 69 |
+
└── (openai/clip-vit-large-patch14 files)
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
<details>
|
| 73 |
+
<summary>日本語</summary>
|
| 74 |
+
|
| 75 |
+
Hugging Faceの[Kandinsky 5.0 Collection](https://huggingface.co/collections/ai-forever/kandinsky-50)からモデルの重みをダウンロードしてください。
|
| 76 |
+
|
| 77 |
+
このドキュメントは **Proモデル** を前提に説明しています。
|
| 78 |
+
|
| 79 |
+
**DiTモデル**: 上記のリポジトリから`.safetensors`ファイルをダウンロードしてください。
|
| 80 |
+
|
| 81 |
+
**VAE**: Kandinsky 5はHunyuanVideo 3D VAEを使用します。上記リンクから`diffusion_pytorch_model.safetensors`(または`pytorch_model.pt`)をダウンロードしてください。
|
| 82 |
+
|
| 83 |
+
**テキストエンコーダ**: Qwen2.5-VL-7BとCLIPを使用します。
|
| 84 |
+
|
| 85 |
+
**Qwen2.5-VL-7B**: https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct からダウンロードしてください(またはローカルの `Qwen/Qwen2.5-VL-7B-Instruct` を指定します)。
|
| 86 |
+
|
| 87 |
+
**CLIP**: Hugging Face Transformersの `openai/clip-vit-large-patch14` を使用してください(モデルIDまたはローカルにキャッシュされたsnapshotディレクトリへのパスを指定します)。
|
| 88 |
+
|
| 89 |
+
任意のディレクトリ構造に配置してください。
|
| 90 |
+
|
| 91 |
+
</details>
|
| 92 |
+
|
| 93 |
+
## List of Kandinsky 5 models / 利用可能なタスク
|
| 94 |
+
|
| 95 |
+
The `--task` option selects a model configuration (architecture, attention type, resolution, and default parameters).
|
| 96 |
+
The DiT checkpoint must be set explicitly via `--dit` (this overrides the task's default checkpoint path).
|
| 97 |
+
|
| 98 |
+
| # | Task | Checkpoint | Parameters | HF URL |
|
| 99 |
+
|---|---|---|---|---|
|
| 100 |
+
| 1 | k5-pro-t2v-5s-sd | kandinsky5pro_t2v_sft_5s.safetensors | T2V, 5s, 19B, Pro SFT | [kandinskylab/Kandinsky-5.0-T2V-Pro-sft-5s](https://huggingface.co/kandinskylab/Kandinsky-5.0-T2V-Pro-sft-5s) |
|
| 101 |
+
| 2 | k5-pro-t2v-10s-sd | kandinsky5pro_t2v_sft_10s.safetensors | T2V, 10s, 19B, Pro SFT | [kandinskylab/Kandinsky-5.0-T2V-Pro-sft-10s](https://huggingface.co/kandinskylab/Kandinsky-5.0-T2V-Pro-sft-10s) |
|
| 102 |
+
| 3 | k5-pro-i2v-5s-sd | kandinsky5pro_i2v_sft_5s.safetensors | I2V, 5s, 19B, Pro SFT | [kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s](https://huggingface.co/kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s) |
|
| 103 |
+
| 4 | k5-pro-t2v-5s-sd | kandinsky5pro_t2v_pretrain_5s.safetensors | T2V, 5s, 19B, Pro Pretrain | [kandinskylab/Kandinsky-5.0-T2V-Pro-pretrain-5s](https://huggingface.co/kandinskylab/Kandinsky-5.0-T2V-Pro-pretrain-5s) |
|
| 104 |
+
| 5 | k5-pro-t2v-10s-sd | kandinsky5pro_t2v_pretrain_10s.safetensors | T2V, 10s, 19B, Pro Pretrain | [kandinskylab/Kandinsky-5.0-T2V-Pro-pretrain-10s](https://huggingface.co/kandinskylab/Kandinsky-5.0-T2V-Pro-pretrain-10s) |
|
| 105 |
+
|
| 106 |
+
[Kandinsky 5.0 Video Lite models](https://huggingface.co/collections/kandinskylab/kandinsky-50-video-lite) are technically supported, but were not extensively tested. Community feedback is welcome.
|
| 107 |
+
|
| 108 |
+
[Kandinsky 5.0 Image Lite models](https://huggingface.co/collections/kandinskylab/kandinsky-50-image-lite) are not supported, but support can be implemented if they get active support from the community.
|
| 109 |
+
|
| 110 |
+
<details>
|
| 111 |
+
<summary>日本語</summary>
|
| 112 |
+
|
| 113 |
+
`--task` オプションでタスク設定(アーキテクチャ、attention、解像度、各種デフォルト値)を選択します。
|
| 114 |
+
DiTのチェックポイントは `--dit` で明示的に指定できます(タスクのデフォルトのパスを上書きします)。
|
| 115 |
+
|
| 116 |
+
Kandinsky 5.0 Video Liteモデル(https://huggingface.co/collections/kandinskylab/kandinsky-50-video-lite)は技術的にはサポートされていますが、十分な動作確認はできていません。問題があればフィードバックをお願いします。
|
| 117 |
+
|
| 118 |
+
Kandinsky 5.0 Image Liteモデル(https://huggingface.co/collections/kandinskylab/kandinsky-50-image-lite)は現在サポートしていませんが、コミュニティからの継続的な要望・協力があれば対応可能です。
|
| 119 |
+
|
| 120 |
+
</details>
|
| 121 |
+
|
| 122 |
+
## Pre-caching / 事前キャッシュ
|
| 123 |
+
|
| 124 |
+
Pre-caching is required before training. This involves caching both latents and text encoder outputs.
|
| 125 |
+
|
| 126 |
+
### Notes for Kandinsky5 / Kandinsky5の注意点
|
| 127 |
+
|
| 128 |
+
- You must cache **text encoder outputs** with `kandinsky5_cache_text_encoder_outputs.py` before training.
|
| 129 |
+
- `--text_encoder_qwen` / `--text_encoder_clip` are Hugging Face Transformers models: pass a model ID (recommended) or a local HF snapshot directory.
|
| 130 |
+
- For I2V tasks, the latent cache stores both first and last frame latents (`latents_image`, always two frames) when running `kandinsky5_cache_latents.py`—one cache works for both first-only and first+last conditioning.
|
| 131 |
+
|
| 132 |
+
<details>
|
| 133 |
+
<summary>日本語</summary>
|
| 134 |
+
|
| 135 |
+
- 学習前に、`kandinsky5_cache_text_encoder_outputs.py` による **テキストエンコーダ出力のキャッシュ** が必須です。
|
| 136 |
+
- `--text_encoder_qwen` / `--text_encoder_clip` はHugging Face Transformersのモデルです。モデルID(推奨)またはローカルのHF snapshotディレクトリを指定してください。
|
| 137 |
+
- I2Vタスクでは、`kandinsky5_cache_latents.py` 実行時に最初と最後のフレームlatent(`latents_image`、常に2フレーム)もキャッシュされます。1回のキャッシュで first / first+last 両方のモードに対応できます。
|
| 138 |
+
|
| 139 |
+
</details>
|
| 140 |
+
|
| 141 |
+
### Text Encoder Output Pre-caching / テキストエンコーダ出力の事前キャッシュ
|
| 142 |
+
|
| 143 |
+
Text encoder output pre-caching is required. Create the cache using the following command:
|
| 144 |
+
|
| 145 |
+
```bash
|
| 146 |
+
python kandinsky5_cache_text_encoder_outputs.py \
|
| 147 |
+
--dataset_config path/to/dataset.toml \
|
| 148 |
+
--text_encoder_qwen Qwen/Qwen2.5-VL-7B-Instruct \
|
| 149 |
+
--text_encoder_clip openai/clip-vit-large-patch14 \
|
| 150 |
+
--batch_size 4
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
Adjust `--batch_size` according to your available VRAM.
|
| 154 |
+
|
| 155 |
+
For additional options, use `python kandinsky5_cache_text_encoder_outputs.py --help`.
|
| 156 |
+
|
| 157 |
+
<details>
|
| 158 |
+
<summary>日本語</summary>
|
| 159 |
+
|
| 160 |
+
テキストエンコーダ出力の事前キャッシュは必須です。上のコマンド例を使用してキャッシュを作成してください。
|
| 161 |
+
|
| 162 |
+
使用可能なVRAMに合わせて `--batch_size` を調整してください。
|
| 163 |
+
|
| 164 |
+
その他のオプションは `--help` で確認できます。
|
| 165 |
+
|
| 166 |
+
</details>
|
| 167 |
+
|
| 168 |
+
### Latent Pre-caching / latentの事前キャッシュ
|
| 169 |
+
|
| 170 |
+
Latent pre-caching is required. Create the cache using the following command:
|
| 171 |
+
|
| 172 |
+
```bash
|
| 173 |
+
python kandinsky5_cache_latents.py \
|
| 174 |
+
--dataset_config path/to/dataset.toml \
|
| 175 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
For NABLA training, you may want to build NABLA-compatible latent caches:
|
| 179 |
+
|
| 180 |
+
```bash
|
| 181 |
+
python kandinsky5_cache_latents.py \
|
| 182 |
+
--dataset_config path/to/dataset.toml \
|
| 183 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors \
|
| 184 |
+
--nabla_resize
|
| 185 |
+
```
|
| 186 |
+
|
| 187 |
+
If you're running low on VRAM, lower the `--batch_size`.
|
| 188 |
+
|
| 189 |
+
For additional options, use `python kandinsky5_cache_latents.py --help`.
|
| 190 |
+
|
| 191 |
+
<details>
|
| 192 |
+
<summary>日本語</summary>
|
| 193 |
+
|
| 194 |
+
latentの事前キャッシュは必須です。上のコマンド例を使用してキャッシュを作成してください。
|
| 195 |
+
|
| 196 |
+
VRAMが足りない場合は、`--batch_size`を小さくしてください。
|
| 197 |
+
|
| 198 |
+
NABLAで学習する場合は、NABLA互換のlatentキャッシュを作成することを推奨します:
|
| 199 |
+
|
| 200 |
+
```bash
|
| 201 |
+
python kandinsky5_cache_latents.py \
|
| 202 |
+
--dataset_config path/to/dataset.toml \
|
| 203 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors \
|
| 204 |
+
--nabla_resize
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
その他のオプションは `--help` で確認できます。
|
| 208 |
+
|
| 209 |
+
</details>
|
| 210 |
+
|
| 211 |
+
## Training / 学習
|
| 212 |
+
|
| 213 |
+
Start training using the following command (input as a single line):
|
| 214 |
+
|
| 215 |
+
```bash
|
| 216 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 217 |
+
kandinsky5_train_network.py \
|
| 218 |
+
--mixed_precision bf16 \
|
| 219 |
+
--dataset_config path/to/dataset.toml \
|
| 220 |
+
--task k5-pro-t2v-5s-sd \
|
| 221 |
+
--dit path/to/kandinsky5pro_t2v_pretrain_5s.safetensors \
|
| 222 |
+
--text_encoder_qwen Qwen/Qwen2.5-VL-7B-Instruct \
|
| 223 |
+
--text_encoder_clip openai/clip-vit-large-patch14 \
|
| 224 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors \
|
| 225 |
+
--fp8_base \
|
| 226 |
+
--sdpa \
|
| 227 |
+
--gradient_checkpointing \
|
| 228 |
+
--max_data_loader_n_workers 1 \
|
| 229 |
+
--persistent_data_loader_workers \
|
| 230 |
+
--learning_rate 1e-4 \
|
| 231 |
+
--optimizer_type AdamW8Bit \
|
| 232 |
+
--optimizer_args "weight_decay=0.001" "betas=(0.9,0.95)" \
|
| 233 |
+
--max_grad_norm 1.0 \
|
| 234 |
+
--lr_scheduler constant_with_warmup \
|
| 235 |
+
--lr_warmup_steps 100 \
|
| 236 |
+
--network_module networks.lora_kandinsky \
|
| 237 |
+
--network_dim 32 \
|
| 238 |
+
--network_alpha 32 \
|
| 239 |
+
--timestep_sampling shift \
|
| 240 |
+
--discrete_flow_shift 5.0 \
|
| 241 |
+
--output_dir path/to/output \
|
| 242 |
+
--output_name k5_lora \
|
| 243 |
+
--save_every_n_epochs 1 \
|
| 244 |
+
--max_train_epochs 50 \
|
| 245 |
+
--scheduler_scale 10.0
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
For I2V training, switch the task and checkpoint to an I2V preset (e.g., `k5-pro-i2v-5s-sd` with `kandinsky5pro_i2v_sft_5s.safetensors`). The latent cache already stores first and last frame latents (`latents_image`, two frames) when you run `kandinsky5_cache_latents.py`, so the same cache covers both first-only and first+last modes—no extra flags are needed beyond picking an I2V task.
|
| 249 |
+
|
| 250 |
+
**Note on first+last frame conditioning**: First+last frame training support is experimental. The effectiveness and plausibility of this approach have not yet been thoroughly tested. Feedback and results from community testing are welcome.
|
| 251 |
+
|
| 252 |
+
The training settings are experimental. Appropriate learning rates, training steps, timestep distribution, etc. are not yet fully determined. Feedback is welcome.
|
| 253 |
+
|
| 254 |
+
For additional options, use `python kandinsky5_train_network.py --help`.
|
| 255 |
+
|
| 256 |
+
### Key Options / 主要オプション
|
| 257 |
+
|
| 258 |
+
- `--task`: Model configuration (architecture, attention type, resolution, sampling parameters). See Available Tasks above.
|
| 259 |
+
- `--dit`: Path to DiT checkpoint. **Overrides the task's default checkpoint path.** You can use any compatible checkpoint (SFT, pretrain, or your own) with any task config as long as the architecture matches.
|
| 260 |
+
- `--vae`: Path to VAE checkpoint (overrides task default)
|
| 261 |
+
- `--network_module`: Use `networks.lora_kandinsky` for Kandinsky5 LoRA
|
| 262 |
+
|
| 263 |
+
**Note**: The `--task` option only sets the model architecture and parameters, not the weights. Use `--dit` to specify which checkpoint to load.
|
| 264 |
+
|
| 265 |
+
**注意**: `--task`オプションはモデルのアーキテクチャとパラメータのみを設定し、重みは設定しません。`--dit`で読み込むチェックポイントを指定してください。
|
| 266 |
+
|
| 267 |
+
### Memory Optimization / メモリ最適化
|
| 268 |
+
|
| 269 |
+
`--gradient_checkpointing` enables gradient checkpointing to reduce VRAM usage.
|
| 270 |
+
|
| 271 |
+
`--fp8_base` runs DiT in fp8 mode. This can significantly reduce memory consumption but may impact output quality.
|
| 272 |
+
|
| 273 |
+
If you're running low on VRAM, use `--blocks_to_swap` to offload some blocks to CPU.
|
| 274 |
+
|
| 275 |
+
`--gradient_checkpointing_cpu_offload` can be used to offload activations to CPU when using gradient checkpointing. This must be used together with `--gradient_checkpointing`.
|
| 276 |
+
|
| 277 |
+
### Attention / アテンション
|
| 278 |
+
|
| 279 |
+
Use `--sdpa`, `--flash_attn`, `--flash3`, `--sage_attn`, or `--xformers` to control the attention backend for Kandinsky5.
|
| 280 |
+
|
| 281 |
+
### Kandinsky5-specific Options / Kandinsky5固有オプション
|
| 282 |
+
|
| 283 |
+
- `--scheduler_scale`: Overrides the task's scheduler scaling factor. This affects the timestep schedule used in sampling/inference and is also stored in the task config used during training.
|
| 284 |
+
- `--offload_dit_during_sampling`: Offloads the DiT model to CPU during sampling (sample generation during training, and in `kandinsky5_generate_video.py`) to reduce peak VRAM usage.
|
| 285 |
+
- `--i` / `--image`: Init image path for i2v-style seeding in `kandinsky5_generate_video.py`.
|
| 286 |
+
|
| 287 |
+
**NABLA attention (training):**
|
| 288 |
+
|
| 289 |
+
- `--force_nabla_attention`: Force NABLA attention regardless of the task default.
|
| 290 |
+
- `--nabla_method`: NABLA binarization method (default `topcdf`).
|
| 291 |
+
- `--nabla_P`: CDF threshold (default `0.9`).
|
| 292 |
+
- `--nabla_wT`, `--nabla_wH`, `--nabla_wW`: STA window sizes (defaults `11`, `3`, `3`).
|
| 293 |
+
- `--nabla_add_sta` / `--no_nabla_add_sta`: Enable/disable STA prior when forcing NABLA.
|
| 294 |
+
|
| 295 |
+
**NABLA-compatible latent caching:**
|
| 296 |
+
|
| 297 |
+
- `kandinsky5_cache_latents.py --nabla_resize`: Resizes inputs to the next multiple of 128 before VAE encoding, which helps produce latents compatible with NABLA geometry constraints.
|
| 298 |
+
|
| 299 |
+
### Sample Generation During Training / 学習中のサンプル生成
|
| 300 |
+
|
| 301 |
+
Sample generation during training is supported. See [sampling during training](./sampling_during_training.md) for details.
|
| 302 |
+
|
| 303 |
+
<details>
|
| 304 |
+
<summary>日本語</summary>
|
| 305 |
+
|
| 306 |
+
上のコマンド例を使用して学習を開始してください(実際には一行で入力)。
|
| 307 |
+
|
| 308 |
+
日本語セクションの例(英語セクションと同じ内容):
|
| 309 |
+
|
| 310 |
+
```bash
|
| 311 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 312 |
+
kandinsky5_train_network.py \
|
| 313 |
+
--mixed_precision bf16 \
|
| 314 |
+
--dataset_config path/to/dataset.toml \
|
| 315 |
+
--task k5-pro-t2v-5s-sd \
|
| 316 |
+
--dit path/to/kandinsky5pro_t2v_pretrain_5s.safetensors \
|
| 317 |
+
--text_encoder_qwen Qwen/Qwen2.5-VL-7B-Instruct \
|
| 318 |
+
--text_encoder_clip openai/clip-vit-large-patch14 \
|
| 319 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors \
|
| 320 |
+
--fp8_base \
|
| 321 |
+
--sdpa \
|
| 322 |
+
--gradient_checkpointing \
|
| 323 |
+
--max_data_loader_n_workers 1 \
|
| 324 |
+
--persistent_data_loader_workers \
|
| 325 |
+
--learning_rate 1e-4 \
|
| 326 |
+
--optimizer_type AdamW8Bit \
|
| 327 |
+
--optimizer_args "weight_decay=0.001" "betas=(0.9,0.95)" \
|
| 328 |
+
--max_grad_norm 1.0 \
|
| 329 |
+
--lr_scheduler constant_with_warmup \
|
| 330 |
+
--lr_warmup_steps 100 \
|
| 331 |
+
--network_module networks.lora_kandinsky \
|
| 332 |
+
--network_dim 32 \
|
| 333 |
+
--network_alpha 32 \
|
| 334 |
+
--timestep_sampling shift \
|
| 335 |
+
--discrete_flow_shift 5.0 \
|
| 336 |
+
--output_dir path/to/output \
|
| 337 |
+
--output_name k5_lora \
|
| 338 |
+
--save_every_n_epochs 1 \
|
| 339 |
+
--max_train_epochs 50 \
|
| 340 |
+
--scheduler_scale 10.0
|
| 341 |
+
```
|
| 342 |
+
|
| 343 |
+
I2Vの学習を行う場合は、タスクとチェックポイントをI2V向けプリセットに変更してください(例: `k5-pro-i2v-5s-sd` と `kandinsky5pro_i2v_sft_5s.safetensors`)。`kandinsky5_cache_latents.py` でlatentをキャッシュする際に、最初のフレームlatent(`latents_image`)も保存されるため、I2V専用の追加フラグは不要です(I2Vタスクを選ぶだけで動作します)。
|
| 344 |
+
|
| 345 |
+
**最初と最後のフレーム条件付けについて**: 最初と最後のフレーム学習サポートは実験的なものです。このアプローチの有効性と妥当性はまだ十分にテストされていません。コミュニティからのフィードバックと結果をお待ちしています。
|
| 346 |
+
|
| 347 |
+
学習設定は実験的なものです。適切な学習率、学習ステップ数、タイムステップの分布などは、まだ完全には決まっていません。フィードバックをお待ちしています。
|
| 348 |
+
|
| 349 |
+
その他のオプションは `--help` で確認できます。
|
| 350 |
+
|
| 351 |
+
**主要オプション**
|
| 352 |
+
|
| 353 |
+
- `--task`: モデル設定(上記の利用可能なタスクを参照)
|
| 354 |
+
- `--dit`: DiTチェックポイントへのパス(タスクのデフォルトを上書き)
|
| 355 |
+
- `--vae`: VAEチェックポイントへのパス(タスクのデフォルトを上書き)
|
| 356 |
+
- `--network_module`: Kandinsky5 LoRAには `networks.lora_kandinsky` を使用
|
| 357 |
+
|
| 358 |
+
**メモリ最適化**
|
| 359 |
+
|
| 360 |
+
`--gradient_checkpointing`でgradient checkpointingを有効にし、VRAM使用量を削減できます。
|
| 361 |
+
|
| 362 |
+
`--fp8_base`を指定すると、DiTがfp8で学習されます。消費メモリを大きく削減できますが、品質は低下する可能性があります。
|
| 363 |
+
|
| 364 |
+
VRAMが足りない場合は、`--blocks_to_swap`を指定して、一部のブロックをCPUにオフロードしてください。
|
| 365 |
+
|
| 366 |
+
`--gradient_checkpointing_cpu_offload`を指定すると、gradient checkpointing使用時にアクティベーションをCPUにオフロードします。`--gradient_checkpointing`と併用する必要があります。
|
| 367 |
+
|
| 368 |
+
**アテンション**
|
| 369 |
+
|
| 370 |
+
`--sdpa`/`--flash_attn`/`--flash3`/`--sage_attn`/`--xformers`はKandinsky5のattention backendに適用されます。
|
| 371 |
+
|
| 372 |
+
**Kandinsky5固有オプション**
|
| 373 |
+
|
| 374 |
+
- `--scheduler_scale`: タスクの`scheduler_scale`を上書きします。サンプリング/推論で使うタイムステップスケジュールに影響します。
|
| 375 |
+
- `--offload_dit_during_sampling`: サンプル生成時(学習中のサンプリング、および `kandinsky5_generate_video.py`)にDiTをCPUへ退避し、ピークVRAMを下げます。
|
| 376 |
+
- `--i` / `--image`: `kandinsky5_generate_video.py` でi2v風の初期画像(1フレーム目のシード)を指定します。
|
| 377 |
+
|
| 378 |
+
**NABLAアテンション(学習)**
|
| 379 |
+
|
| 380 |
+
- `--force_nabla_attention`: タスク設定に関係なくNABLAを強制します。
|
| 381 |
+
- `--nabla_method`: NABLAの二値化メソッド(デフォルト `topcdf`)。
|
| 382 |
+
- `--nabla_P`: CDFしきい値(デフォルト `0.9`)。
|
| 383 |
+
- `--nabla_wT`, `--nabla_wH`, `--nabla_wW`: STAウィンドウ(デフォルト `11`, `3`, `3`)。
|
| 384 |
+
- `--nabla_add_sta` / `--no_nabla_add_sta`: STA priorの有効/無効。
|
| 385 |
+
|
| 386 |
+
**NABLA互換latentキャッシュ**
|
| 387 |
+
|
| 388 |
+
- `kandinsky5_cache_latents.py --nabla_resize`: VAEエンコード前に入力を128の倍数へリサイズし、NABLAの幾何条件に合うlatentを生成しやすくします。
|
| 389 |
+
|
| 390 |
+
**学習中のサンプル生成**
|
| 391 |
+
|
| 392 |
+
学習中のサンプル生成がサポートされています。詳細は[学習中のサンプリング](./sampling_during_training.md)を参��してください。
|
| 393 |
+
|
| 394 |
+
</details>
|
| 395 |
+
|
| 396 |
+
## Inference / 推論
|
| 397 |
+
|
| 398 |
+
Generate videos using the following command:
|
| 399 |
+
|
| 400 |
+
```bash
|
| 401 |
+
python kandinsky5_generate_video.py \
|
| 402 |
+
--task k5-pro-t2v-5s-sd \
|
| 403 |
+
--dit path/to/kandinsky5pro_t2v_pretrain_5s.safetensors \
|
| 404 |
+
--vae path/to/vae/diffusion_pytorch_model.safetensors \
|
| 405 |
+
--text_encoder_qwen Qwen/Qwen2.5-VL-7B-Instruct \
|
| 406 |
+
--text_encoder_clip openai/clip-vit-large-patch14 \
|
| 407 |
+
--offload_dit_during_sampling \
|
| 408 |
+
--fp8_base \
|
| 409 |
+
--dtype bfloat16 \
|
| 410 |
+
--prompt "A cat walks on the grass, realistic style." \
|
| 411 |
+
--negative_prompt "low quality, artifacts" \
|
| 412 |
+
--frames 17 \
|
| 413 |
+
--steps 50 \
|
| 414 |
+
--guidance 5 \
|
| 415 |
+
--scheduler_scale 10 \
|
| 416 |
+
--seed 42 \
|
| 417 |
+
--width 512 \
|
| 418 |
+
--height 512 \
|
| 419 |
+
--output path/to/output.mp4 \
|
| 420 |
+
--lora_weight path/to/lora.safetensors \
|
| 421 |
+
--lora_multiplier 1.0
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
### Options / オプション
|
| 425 |
+
|
| 426 |
+
- `--task`: Model configuration
|
| 427 |
+
- `--prompt`: Text prompt for generation
|
| 428 |
+
- `--negative_prompt`: Negative prompt (optional)
|
| 429 |
+
- `--output`: Output file path (.mp4 for video, .png for image)
|
| 430 |
+
- `--width`, `--height`: Output resolution (defaults from task config)
|
| 431 |
+
- `--frames`: Number of frames (defaults from task config)
|
| 432 |
+
- `--steps`: Number of inference steps (defaults from task config)
|
| 433 |
+
- `--guidance`: Guidance scale (defaults from task config)
|
| 434 |
+
- `--seed`: Random seed
|
| 435 |
+
- `--fp8_base`: Run DiT in fp8 mode
|
| 436 |
+
- `--blocks_to_swap`: Number of blocks to offload to CPU
|
| 437 |
+
- `--lora_weight`: Path(s) to LoRA weight file(s)
|
| 438 |
+
- `--lora_multiplier`: LoRA multiplier(s)
|
| 439 |
+
|
| 440 |
+
For additional options, use `python kandinsky5_generate_video.py --help`.
|
| 441 |
+
|
| 442 |
+
<details>
|
| 443 |
+
<summary>日本語</summary>
|
| 444 |
+
|
| 445 |
+
上のコマンド例を使用して動画を生成します。
|
| 446 |
+
|
| 447 |
+
**オプション**
|
| 448 |
+
|
| 449 |
+
- `--task`: モデル設定
|
| 450 |
+
- `--prompt`: 生成用のテキストプロンプト
|
| 451 |
+
- `--negative_prompt`: ネガティブプロンプト(オプション)
|
| 452 |
+
- `--output`: 出力ファイルパス(動画は.mp4、画像は.png)
|
| 453 |
+
- `--width`, `--height`: 出力解像度(タスク設定からのデフォルト)
|
| 454 |
+
- `--frames`: フレーム数(タスク設定からのデフォルト)
|
| 455 |
+
- `--steps`: 推論ステップ数(タスク設定からのデフォルト)
|
| 456 |
+
- `--guidance`: ガイダンススケール(タスク設定からのデフォルト)
|
| 457 |
+
- `--seed`: ランダムシード
|
| 458 |
+
- `--fp8_base`: DiTをfp8モードで実行
|
| 459 |
+
- `--blocks_to_swap`: CPUにオフロードするブロック数
|
| 460 |
+
- `--lora_weight`: LoRA重みファイルへのパス
|
| 461 |
+
- `--lora_multiplier`: LoRA係数
|
| 462 |
+
|
| 463 |
+
その他のオプションは `--help` で確認できます。
|
| 464 |
+
|
| 465 |
+
</details>
|
| 466 |
+
|
| 467 |
+
## Dataset Configuration / データセット設定
|
| 468 |
+
|
| 469 |
+
Dataset configuration is the same as other architectures. See [dataset configuration](./dataset_config.md) for details.
|
| 470 |
+
|
| 471 |
+
<details>
|
| 472 |
+
<summary>日本語</summary>
|
| 473 |
+
|
| 474 |
+
データセット設定は他のアーキテクチャと同じです。詳細は[データセット設定](./dataset_config.md)を参照してください。
|
| 475 |
+
|
| 476 |
+
</details>
|
docs/loha_lokr.md
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# LoHa / LoKr (LyCORIS)
|
| 4 |
+
|
| 5 |
+
## Overview / 概要
|
| 6 |
+
|
| 7 |
+
In addition to standard LoRA, Musubi Tuner supports **LoHa** (Low-rank Hadamard Product) and **LoKr** (Low-rank Kronecker Product) as alternative parameter-efficient fine-tuning methods. These are based on techniques from the [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS) project.
|
| 8 |
+
|
| 9 |
+
- **LoHa**: Represents weight updates as a Hadamard (element-wise) product of two low-rank matrices. Reference: [FedPara (arXiv:2108.06098)](https://arxiv.org/abs/2108.06098)
|
| 10 |
+
- **LoKr**: Represents weight updates as a Kronecker product with optional low-rank decomposition. Reference: [LoKr (arXiv:2309.14859)](https://arxiv.org/abs/2309.14859)
|
| 11 |
+
|
| 12 |
+
The algorithms and recommended settings are described in the [LyCORIS documentation](https://github.com/KohakuBlueleaf/LyCORIS/blob/main/docs/Algo-List.md) and [guidelines](https://github.com/KohakuBlueleaf/LyCORIS/blob/main/docs/Guidelines.md).
|
| 13 |
+
|
| 14 |
+
Both methods target Linear layers only (Conv2d layers are not supported in this implementation).
|
| 15 |
+
|
| 16 |
+
This feature is experimental.
|
| 17 |
+
|
| 18 |
+
<details>
|
| 19 |
+
<summary>日本語</summary>
|
| 20 |
+
|
| 21 |
+
Musubi Tunerでは、標準的なLoRAに加え、代替のパラメータ効率の良いファインチューニング手法として **LoHa**(Low-rank Hadamard Product)と **LoKr**(Low-rank Kronecker Product)をサポートしています。これらは [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS) プロジェクトの手法に基づいています。
|
| 22 |
+
|
| 23 |
+
- **LoHa**: 重みの更新を2つの低ランク行列のHadamard積(要素ごとの積)で表現します。参考文献: [FedPara (arXiv:2108.06098)](https://arxiv.org/abs/2108.06098)
|
| 24 |
+
- **LoKr**: 重みの更新をKronecker積と、オプションの低ランク分解で表現します。参考文献: [LoKr (arXiv:2309.14859)](https://arxiv.org/abs/2309.14859)
|
| 25 |
+
|
| 26 |
+
アルゴリズムと推奨設定は[LyCORISのアルゴリズム解説](https://github.com/KohakuBlueleaf/LyCORIS/blob/main/docs/Algo-List.md)と[ガイドライン](https://github.com/KohakuBlueleaf/LyCORIS/blob/main/docs/Guidelines.md)を参照してください。
|
| 27 |
+
|
| 28 |
+
いずれもLinear層のみを対象としています(Conv2d層はこの実装ではサポートしていません)。
|
| 29 |
+
|
| 30 |
+
この機能は実験的なものです。
|
| 31 |
+
|
| 32 |
+
</details>
|
| 33 |
+
|
| 34 |
+
## Acknowledgments / 謝辞
|
| 35 |
+
|
| 36 |
+
The LoHa and LoKr implementations in Musubi Tuner are based on the [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS) project by [KohakuBlueleaf](https://github.com/KohakuBlueleaf). We would like to express our sincere gratitude for the excellent research and open-source contributions that made this implementation possible.
|
| 37 |
+
|
| 38 |
+
<details>
|
| 39 |
+
<summary>日本語</summary>
|
| 40 |
+
|
| 41 |
+
Musubi TunerのLoHaおよびLoKrの実装は、[KohakuBlueleaf](https://github.com/KohakuBlueleaf)氏による[LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS)プロジェクトに基づいています。この実装を可能にしてくださった素晴らしい研究とオープンソースへの貢献に心から感謝いたします。
|
| 42 |
+
|
| 43 |
+
</details>
|
| 44 |
+
|
| 45 |
+
## Supported architectures / 対応アーキテクチャ
|
| 46 |
+
|
| 47 |
+
LoHa and LoKr automatically detect the model architecture and apply appropriate default settings. The following architectures are supported:
|
| 48 |
+
|
| 49 |
+
- HunyuanVideo
|
| 50 |
+
- HunyuanVideo 1.5
|
| 51 |
+
- Wan 2.1/2.2
|
| 52 |
+
- FramePack
|
| 53 |
+
- FLUX.1 Kontext / FLUX.2
|
| 54 |
+
- Qwen-Image series
|
| 55 |
+
- Z-Image
|
| 56 |
+
|
| 57 |
+
Kandinsky5 is **not supported** with LoHa/LoKr (it requires special handling that is incompatible with automatic architecture detection).
|
| 58 |
+
|
| 59 |
+
Each architecture has its own default `exclude_patterns` to skip non-trainable modules (e.g., modulation layers, normalization layers). These are applied automatically when using LoHa/LoKr.
|
| 60 |
+
|
| 61 |
+
<details>
|
| 62 |
+
<summary>日本語</summary>
|
| 63 |
+
|
| 64 |
+
LoHaとLoKrは、モデルのアーキテクチャを自動で検出し、適切なデフォルト設定を適用します。以下のアーキテクチャに対応しています:
|
| 65 |
+
|
| 66 |
+
- HunyuanVideo
|
| 67 |
+
- HunyuanVideo 1.5
|
| 68 |
+
- Wan 2.1/2.2
|
| 69 |
+
- FramePack
|
| 70 |
+
- FLUX.1 Kontext / FLUX.2
|
| 71 |
+
- Qwen-Image系
|
| 72 |
+
- Z-Image
|
| 73 |
+
|
| 74 |
+
Kandinsky5はLoHa/LoKrに **対応していません**(自動アーキテクチャ検出と互換性のない特殊な処理が必要です)。
|
| 75 |
+
|
| 76 |
+
各アーキテクチャには、学習対象外のモジュール(modulation層、normalization層など)をスキップするデフォルトの `exclude_patterns` が設定されています。LoHa/LoKr使用時にはこれらが自動的に適用されます。
|
| 77 |
+
|
| 78 |
+
</details>
|
| 79 |
+
|
| 80 |
+
## Training / 学習
|
| 81 |
+
|
| 82 |
+
To use LoHa or LoKr, change the `--network_module` argument in your training command. All other training options (dataset config, optimizer, etc.) remain the same as LoRA.
|
| 83 |
+
|
| 84 |
+
<details>
|
| 85 |
+
<summary>日本語</summary>
|
| 86 |
+
|
| 87 |
+
LoHaまたはLoKrを使用するには、学習コマンドの `--network_module` 引数を変更します。その他の学習オプション(データセット設定、オプティマイザなど)はLoRAと同じです。
|
| 88 |
+
|
| 89 |
+
</details>
|
| 90 |
+
|
| 91 |
+
### LoHa
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py \
|
| 95 |
+
--dit path/to/dit \
|
| 96 |
+
--dataset_config path/to/toml \
|
| 97 |
+
--sdpa --mixed_precision bf16 --fp8_base \
|
| 98 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing \
|
| 99 |
+
--network_module networks.loha --network_dim 32 --network_alpha 16 \
|
| 100 |
+
--max_train_epochs 16 --save_every_n_epochs 1 \
|
| 101 |
+
--output_dir path/to/output --output_name my-loha
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
### LoKr
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py \
|
| 108 |
+
--dit path/to/dit \
|
| 109 |
+
--dataset_config path/to/toml \
|
| 110 |
+
--sdpa --mixed_precision bf16 --fp8_base \
|
| 111 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing \
|
| 112 |
+
--network_module networks.lokr --network_dim 32 --network_alpha 16 \
|
| 113 |
+
--max_train_epochs 16 --save_every_n_epochs 1 \
|
| 114 |
+
--output_dir path/to/output --output_name my-lokr
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
Replace `hv_train_network.py` with the appropriate training script for your architecture (e.g., `wan_train_network.py`, `fpack_train_network.py`, etc.).
|
| 118 |
+
|
| 119 |
+
<details>
|
| 120 |
+
<summary>日本語</summary>
|
| 121 |
+
|
| 122 |
+
`hv_train_network.py` の部分は、お使いのアーキテクチャに対応する学習スクリプト(`wan_train_network.py`, `fpack_train_network.py` など)に置き換えてください。
|
| 123 |
+
|
| 124 |
+
</details>
|
| 125 |
+
|
| 126 |
+
### Common training options / 共通の学習オプション
|
| 127 |
+
|
| 128 |
+
The following `--network_args` options are available for both LoHa and LoKr, same as LoRA:
|
| 129 |
+
|
| 130 |
+
| Option | Description |
|
| 131 |
+
|---|---|
|
| 132 |
+
| `verbose=True` | Display detailed information about the network modules |
|
| 133 |
+
| `rank_dropout=0.1` | Apply dropout to the rank dimension during training |
|
| 134 |
+
| `module_dropout=0.1` | Randomly skip entire modules during training |
|
| 135 |
+
| `exclude_patterns=[r'...']` | Exclude modules matching the regex patterns (in addition to architecture defaults) |
|
| 136 |
+
| `include_patterns=[r'...']` | Include only modules matching the regex patterns |
|
| 137 |
+
|
| 138 |
+
See [Advanced configuration](advanced_config.md) for details on how to specify `network_args`.
|
| 139 |
+
|
| 140 |
+
<details>
|
| 141 |
+
<summary>日本語</summary>
|
| 142 |
+
|
| 143 |
+
以下の `--network_args` オプションは、LoRAと同様にLoHaとLoKrの両方で使用できます:
|
| 144 |
+
|
| 145 |
+
| オプション | 説明 |
|
| 146 |
+
|---|---|
|
| 147 |
+
| `verbose=True` | ネットワークモジュールの詳細情報を表示 |
|
| 148 |
+
| `rank_dropout=0.1` | 学習時にランク次元にドロップアウトを適用 |
|
| 149 |
+
| `module_dropout=0.1` | 学習時にモジュール全体をランダムにスキップ |
|
| 150 |
+
| `exclude_patterns=[r'...']` | 正規表現パターンに一致するモジュールを除外(アーキテクチャのデフォルトに追加) |
|
| 151 |
+
| `include_patterns=[r'...']` | 正規表現パターンに一致するモジュールのみを対象とする |
|
| 152 |
+
|
| 153 |
+
`network_args` の指定方法の詳細は [高度な設定](advanced_config.md) を参照してください。
|
| 154 |
+
|
| 155 |
+
</details>
|
| 156 |
+
|
| 157 |
+
### LoKr-specific option: `factor` / LoKr固有のオプション: `factor`
|
| 158 |
+
|
| 159 |
+
LoKr decomposes weight dimensions using factorization. The `factor` option controls how dimensions are split:
|
| 160 |
+
|
| 161 |
+
- `factor=-1` (default): Automatically find balanced factors. For example, dimension 512 is split into (16, 32).
|
| 162 |
+
- `factor=N` (positive integer): Force factorization using the specified value. For example, `factor=4` splits dimension 512 into (4, 128).
|
| 163 |
+
|
| 164 |
+
```bash
|
| 165 |
+
--network_args "factor=4"
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
When `network_dim` (rank) is large enough relative to the factorized dimensions, LoKr uses a full matrix instead of a low-rank decomposition for the second factor. A warning will be logged in this case.
|
| 169 |
+
|
| 170 |
+
<details>
|
| 171 |
+
<summary>日本語</summary>
|
| 172 |
+
|
| 173 |
+
LoKrは重みの次元を因数分解して分割します。`factor` オプションでその分割方法を制御します:
|
| 174 |
+
|
| 175 |
+
- `factor=-1`(デフォルト): バランスの良い因数を自動的に見つけます。例えば、次元512は(16, 32)に分割されます。
|
| 176 |
+
- `factor=N`(正の整数): 指定した値で因数分解します。例えば、`factor=4` は次元512を(4, 128)に分割します。
|
| 177 |
+
|
| 178 |
+
```bash
|
| 179 |
+
--network_args "factor=4"
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
`network_dim`(ランク)が因数分解された次元に対して十分に大きい場合、LoKrは第2因子に低ランク分解ではなくフル行列を使用します。その場合、警告がログに出力されます。
|
| 183 |
+
|
| 184 |
+
</details>
|
| 185 |
+
|
| 186 |
+
## How LoHa and LoKr work / LoHaとLoKrの仕組み
|
| 187 |
+
|
| 188 |
+
### LoHa
|
| 189 |
+
|
| 190 |
+
LoHa represents the weight update as a Hadamard (element-wise) product of two low-rank matrices:
|
| 191 |
+
|
| 192 |
+
```
|
| 193 |
+
ΔW = (W1a × W1b) ⊙ (W2a × W2b)
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
where `W1a`, `W1b`, `W2a`, `W2b` are low-rank matrices with rank `network_dim`. This means LoHa has roughly **twice the number of trainable parameters** compared to LoRA at the same rank, but can capture more complex weight structures due to the element-wise product.
|
| 197 |
+
|
| 198 |
+
### LoKr
|
| 199 |
+
|
| 200 |
+
LoKr represents the weight update using a Kronecker product:
|
| 201 |
+
|
| 202 |
+
```
|
| 203 |
+
ΔW = W1 ⊗ W2 (where W2 = W2a × W2b in low-rank mode)
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
The original weight dimensions are factorized (e.g., a 512×512 weight might be split so that W1 is 16×16 and W2 is 32×32). W1 is always a full matrix (small), while W2 can be either low-rank decomposed or a full matrix depending on the rank setting. LoKr tends to produce **smaller models** compared to LoRA at the same rank.
|
| 207 |
+
|
| 208 |
+
<details>
|
| 209 |
+
<summary>日本語</summary>
|
| 210 |
+
|
| 211 |
+
### LoHa
|
| 212 |
+
|
| 213 |
+
LoHaは重みの更新を2つの低ランク行列のHadamard積(要素ごとの積)で表現します:
|
| 214 |
+
|
| 215 |
+
```
|
| 216 |
+
ΔW = (W1a × W1b) ⊙ (W2a × W2b)
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
ここで `W1a`, `W1b`, `W2a`, `W2b` はランク `network_dim` の低ランク行列です。LoHaは同じランクのLoRAと比較して学習可能なパラメータ数が **約2倍** になりますが、要素ごとの積により、より複雑な重み構造を捉えることができます。
|
| 220 |
+
|
| 221 |
+
### LoKr
|
| 222 |
+
|
| 223 |
+
LoKrはKronecker積を使って重みの更新を表現します:
|
| 224 |
+
|
| 225 |
+
```
|
| 226 |
+
ΔW = W1 ⊗ W2 (低ランクモードでは W2 = W2a × W2b)
|
| 227 |
+
```
|
| 228 |
+
|
| 229 |
+
元の重みの次元が因数分解されます(例: 512×512の重みが、W1が16×16、W2が32×32に分割されます)。W1は常にフル行列(小さい)で、W2はランク設定に応じて低ランク分解またはフル行列になります。LoKrは同じランクのLoRAと比較して **より小さいモデル** を生成する傾向があります。
|
| 230 |
+
|
| 231 |
+
</details>
|
| 232 |
+
|
| 233 |
+
## Inference / 推論
|
| 234 |
+
|
| 235 |
+
Trained LoHa/LoKr weights are saved in safetensors format, just like LoRA. The inference method depends on the architecture.
|
| 236 |
+
|
| 237 |
+
<details>
|
| 238 |
+
<summary>日本語</summary>
|
| 239 |
+
|
| 240 |
+
学習済みのLoHa/LoKrの重みは、LoRAと同様にsafetensors形式で保存されます。推論方法はアーキテクチャによって異なります。
|
| 241 |
+
|
| 242 |
+
</details>
|
| 243 |
+
|
| 244 |
+
### Architectures with built-in support / ネイティブサポートのあるアーキテクチャ
|
| 245 |
+
|
| 246 |
+
The following architectures automatically detect and load LoHa/LoKr weights without any additional options:
|
| 247 |
+
|
| 248 |
+
- Wan 2.1/2.2
|
| 249 |
+
- FramePack
|
| 250 |
+
- HunyuanVideo 1.5
|
| 251 |
+
- FLUX.2
|
| 252 |
+
- Qwen-Image series
|
| 253 |
+
- Z-Image
|
| 254 |
+
|
| 255 |
+
Use `--lora_weight` as usual:
|
| 256 |
+
|
| 257 |
+
```bash
|
| 258 |
+
python src/musubi_tuner/wan_generate_video.py ... --lora_weight path/to/loha_or_lokr.safetensors
|
| 259 |
+
```
|
| 260 |
+
|
| 261 |
+
<details>
|
| 262 |
+
<summary>日本語</summary>
|
| 263 |
+
|
| 264 |
+
以下のアーキテクチャでは、LoHa/LoKrの重みを追加オプションなしで自動検出して読み込みます:
|
| 265 |
+
|
| 266 |
+
- Wan 2.1/2.2
|
| 267 |
+
- FramePack
|
| 268 |
+
- HunyuanVideo 1.5
|
| 269 |
+
- FLUX.2
|
| 270 |
+
- Qwen-Image系
|
| 271 |
+
- Z-Image
|
| 272 |
+
|
| 273 |
+
通常通り `--lora_weight` を使用します:
|
| 274 |
+
|
| 275 |
+
```bash
|
| 276 |
+
python src/musubi_tuner/wan_generate_video.py ... --lora_weight path/to/loha_or_lokr.safetensors
|
| 277 |
+
```
|
| 278 |
+
|
| 279 |
+
</details>
|
| 280 |
+
|
| 281 |
+
### HunyuanVideo / FLUX.1 Kontext
|
| 282 |
+
|
| 283 |
+
For HunyuanVideo and FLUX.1 Kontext, the `--lycoris` option is required, and the [LyCORIS library](https://github.com/KohakuBlueleaf/LyCORIS) must be installed:
|
| 284 |
+
|
| 285 |
+
```bash
|
| 286 |
+
pip install lycoris-lora
|
| 287 |
+
|
| 288 |
+
python src/musubi_tuner/hv_generate_video.py ... --lora_weight path/to/loha_or_lokr.safetensors --lycoris
|
| 289 |
+
```
|
| 290 |
+
|
| 291 |
+
<details>
|
| 292 |
+
<summary>日本語</summary>
|
| 293 |
+
|
| 294 |
+
HunyuanVideoとFLUX.1 Kontextでは、`--lycoris` オプションが必要で、[LyCORIS ライブラリ](https://github.com/KohakuBlueleaf/LyCORIS)のインストールが必要です:
|
| 295 |
+
|
| 296 |
+
```bash
|
| 297 |
+
pip install lycoris-lora
|
| 298 |
+
|
| 299 |
+
python src/musubi_tuner/hv_generate_video.py ... --lora_weight path/to/loha_or_lokr.safetensors --lycoris
|
| 300 |
+
```
|
| 301 |
+
|
| 302 |
+
</details>
|
| 303 |
+
|
| 304 |
+
## Limitations / 制限事項
|
| 305 |
+
|
| 306 |
+
### LoRA+ is not supported / LoRA+は非対応
|
| 307 |
+
|
| 308 |
+
LoRA+ (`loraplus_lr_ratio` in `--network_args`) is **not supported** with LoHa/LoKr. LoRA+ works by applying different learning rates to the LoRA-A and LoRA-B matrices, which is specific to the standard LoRA architecture. LoHa and LoKr have different parameter structures and this optimization does not apply.
|
| 309 |
+
|
| 310 |
+
<details>
|
| 311 |
+
<summary>日本語</summary>
|
| 312 |
+
|
| 313 |
+
LoRA+(`--network_args` の `loraplus_lr_ratio`)はLoHa/LoKrでは **非対応** です。LoRA+はLoRA-AとLoRA-Bの行列に異なる学習率を適用する手法であり、標準的なLoRAのアーキテクチャに固有のものです。LoHaとLoKrはパラメータ構造が異なるため、この最適化は適用されません。
|
| 314 |
+
|
| 315 |
+
</details>
|
| 316 |
+
|
| 317 |
+
### Merging to base model / ベースモデルへのマージ
|
| 318 |
+
|
| 319 |
+
`merge_lora.py` currently supports standard LoRA only. LoHa/LoKr weights cannot be merged into the base model using this script.
|
| 320 |
+
|
| 321 |
+
For architectures with built-in LoHa/LoKr support (listed above), merging is performed automatically during model loading at inference time, so this limitation only affects offline merging workflows.
|
| 322 |
+
|
| 323 |
+
<details>
|
| 324 |
+
<summary>日本語</summary>
|
| 325 |
+
|
| 326 |
+
`merge_lora.py` は現在、標準LoRAのみをサポートしています。このスクリプトではLoHa/LoKrの重みをベースモデルにマージすることはできません。
|
| 327 |
+
|
| 328 |
+
LoHa/LoKrのネイティブサポートがあるアーキテクチャ(上記)では、推論時のモデル読み込み時にマージが自動的に行われるため、この制限はオフラインマージのワークフローにのみ���響します。
|
| 329 |
+
|
| 330 |
+
</details>
|
| 331 |
+
|
| 332 |
+
### Format conversion / フォーマット変換
|
| 333 |
+
|
| 334 |
+
`convert_lora.py` is extended to also support format conversion of LoHa/LoKr weights between Musubi Tuner format and Diffusers format for ComfyUI.
|
| 335 |
+
|
| 336 |
+
<details>
|
| 337 |
+
<summary>日本語</summary>
|
| 338 |
+
|
| 339 |
+
`convert_lora.py` は、LoRAに加えて、LoHa/LoKrの重みのフォーマット変換(Musubi Tuner形式とDiffusers形式間の変換)についてもサポートするよう、拡張されています。
|
| 340 |
+
|
| 341 |
+
</details>
|
docs/ltx_2.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
docs/qwen_image.md
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Qwen-Image
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of the Qwen-Image and Qwen-Image-Edit/Edit-2509/Edit-2511/Layered architecture within the Musubi Tuner framework. Qwen-Image is a text-to-image generation model that supports standard text-to-image generation, and Qwen-Image-Edit is a model that supports image editing with control images, Layered is a model that supports image layer segmentation.
|
| 6 |
+
|
| 7 |
+
Qwen-Image-Edit-2509/2511 can use multiple control images simultaneously. While the official version supports up to 3 images, Musubi Tuner allows specifying any number of images (though correct operation is confirmed only up to 3). Additionally, the sizes of the control images can differ (both during training and inference).
|
| 8 |
+
|
| 9 |
+
This feature is experimental.
|
| 10 |
+
|
| 11 |
+
Latent pre-caching, training, and inference options can be found in the `--help` output. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 12 |
+
|
| 13 |
+
<details>
|
| 14 |
+
<summary>日本語</summary>
|
| 15 |
+
|
| 16 |
+
このドキュメントは、Musubi Tunerフレームワーク内でのQwen-Image、Qwen-Image-Edit/Edit-2509/Edit-2511/Layeredアーキテクチャの使用法について説明しています。Qwen-Imageは標準的なテキストから画像生成モデルで、Qwen-Image-Editは制御画像を使った画像編集をサポートするモデル、Layeredは画像のレイヤー分割をサポートするモデルです。
|
| 17 |
+
|
| 18 |
+
Qwen-Image-Edit-2509/2511は、複数枚の制御画像を同時に使用できます。公式では3枚までですが、Musubi Tunerでは任意の枚数を指定できます(正しく動作するのは3枚までです)。またそれぞれの制御画像のサイズは異なっていても問題ありません(学習時、推論時とも)。
|
| 19 |
+
|
| 20 |
+
この機能は実験的なものです。
|
| 21 |
+
|
| 22 |
+
事前キャッシング、学習、推論のオプションは`--help`で確認してください。HunyuanVideoと共通のオプションが多くありますので、必要に応じて[HunyuanVideoのドキュメント](./hunyuan_video.md)も参照してください。
|
| 23 |
+
|
| 24 |
+
</details>
|
| 25 |
+
|
| 26 |
+
## Download the model / モデルのダウンロード
|
| 27 |
+
|
| 28 |
+
You need to download the DiT, VAE, and Text Encoder (Qwen2.5-VL) models.
|
| 29 |
+
|
| 30 |
+
Official weights from [Qwen's official weights](https://huggingface.co/Qwen) can be used for DiT, Text Encoder, and VAE respectively. If you want to use the weights for ComfyUI, please follow below.
|
| 31 |
+
|
| 32 |
+
- **Qwen-Image DiT, Text Encoder (Qwen2.5-VL)**: For Qwen-Image DiT and Text Encoder, download `split_files/diffusion_models/qwen_image_bf16.safetensors` and `split_files/text_encoders/qwen_2.5_vl_7b.safetensors` from https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI, respectively. **The fp8_scaled version cannot be used.**
|
| 33 |
+
|
| 34 |
+
- **VAE**: For VAE, download `split_files/vae/qwen_image_vae.safetensors` similarly from https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI. ComfyUI's VAE weights are also now usable.
|
| 35 |
+
|
| 36 |
+
- **Qwen-Image-Edit DiT**: For Qwen-Image-Edit DiT, download `split_files/diffusion_models/qwen_image_edit_bf16.safetensors`, or for Edit-2509, download `split_files/diffusion_models/qwen_image_edit_2509_bf16.safetensors`, and for Edit-2511, download similar from https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI. **fp8_e4m3fn cannot be used.** Text Encoder and VAE are same as Qwen-Image.
|
| 37 |
+
|
| 38 |
+
- **Qwen-Image-Layered VAE**: For Qwen-Image-Layered VAE, download `split_files/vae/qwen_image_layered_vae.safetensors` from https://huggingface.co/Comfy-Org/Qwen-Image-Layered_ComfyUI.
|
| 39 |
+
|
| 40 |
+
- **Qwen-Image-Layered DiT**: For Qwen-Image-Layered DiT, download `split_files/diffusion_models/qwen_image_layered_bf16.safetensors` from https://huggingface.co/Comfy-Org/Qwen-Image-Layered_ComfyUI. **fp8mixed cannot be used.** Text Encoder is same as Qwen-Image.
|
| 41 |
+
|
| 42 |
+
Thanks to Comfy-Org for releasing these weights.
|
| 43 |
+
|
| 44 |
+
<details>
|
| 45 |
+
<summary>日本語</summary>
|
| 46 |
+
|
| 47 |
+
DiT, VAE, Text Encoder (Qwen2.5-VL) のモデルをダウンロードする必要があります。
|
| 48 |
+
|
| 49 |
+
DiT、Text Encoder、VAEのそれぞれに、[Qwenの公式の重み](https://huggingface.co/Qwen)を使用可能です。ComfyUI用の重みを使用する場合は、以下の通りです。
|
| 50 |
+
- **DiT, Text Encoder (Qwen2.5-VL)**: DiTおよびText Encoderは、`split_files/diffusion_models/qwen_image_bf16.safetensors` と `split_files/text_encoders/qwen_2.5_vl_7b.safetensors` を https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI からそれぞれダウンロードしてください。**fp8_scaledバージョンは使用できません。**
|
| 51 |
+
|
| 52 |
+
- **VAE**: VAEは `split_files/vae/qwen_image_vae.safetensors` を同様に https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI からダウンロードしてください。ComfyUIのVAEの重みも使用できるようになりました。
|
| 53 |
+
|
| 54 |
+
- **Qwen-Image-Edit DiT**: Qwen-Image-Edit DiTは、`split_files/diffusion_models/qwen_image_edit_bf16.safetensors` を、Edit-2509の場合は `split_files/diffusion_models/qwen_image_edit_2509_bf16.safetensors` を https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI からダウンロードしてください。**`fp8_e4m3fn`は使用できません。**Text EncoderとVAEはQwen-Imageと同じです。
|
| 55 |
+
|
| 56 |
+
これらの重みを公開してくださったComfy-Orgに感謝します。
|
| 57 |
+
|
| 58 |
+
</details>
|
| 59 |
+
|
| 60 |
+
### Summary of files to download / ダウンロードするファイルのまとめ
|
| 61 |
+
|
| 62 |
+
**fp8_scaled and fp8_e4m3fn versions cannot be used.**
|
| 63 |
+
|
| 64 |
+
**Download from https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI :**
|
| 65 |
+
|type|model|file|
|
| 66 |
+
|----|--------|--------------|
|
| 67 |
+
|DiT|Qwen-Image (no edit)|`split_files/diffusion_models/qwen_image_bf16.safetensors`|
|
| 68 |
+
|Text Encoder|Qwen2.5-VL|`split_files/text_encoders/qwen_2.5_vl_7b.safetensors`|
|
| 69 |
+
|VAE|Qwen-Image VAE|`split_files/vae/qwen_image_vae.safetensors`|
|
| 70 |
+
|
| 71 |
+
**Download from https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI :**
|
| 72 |
+
|type|model|file|
|
| 73 |
+
|----|--------|--------------|
|
| 74 |
+
|DiT|Qwen-Image-Edit|`split_files/diffusion_models/qwen_image_edit_bf16.safetensors`|
|
| 75 |
+
|DiT|Qwen-Image-Edit-2509|`split_files/diffusion_models/qwen_image_edit_2509_bf16.safetensors`|
|
| 76 |
+
|DiT|Qwen-Image-Edit-2511|`split_files/diffusion_models/qwen_image_edit_2511_bf16.safetensors`|
|
| 77 |
+
|
| 78 |
+
**Download from https://huggingface.co/Comfy-Org/Qwen-Image-Layered_ComfyUI :**
|
| 79 |
+
|type|model|file|
|
| 80 |
+
|----|--------|--------------|
|
| 81 |
+
|VAE|Qwen-Image-Layered VAE|`split_files/vae/qwen_image_layered_vae.safetensors`|
|
| 82 |
+
|DiT|Qwen-Image-Layered|`split_files/diffusion_models/qwen_image_layered_bf16.safetensors`|
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
## Specifying Model Version / モデルバージョンの指定
|
| 86 |
+
|
| 87 |
+
When specifying the model version in various scripts, use the following options:
|
| 88 |
+
|type|option|note|
|
| 89 |
+
|----|--------|----|
|
| 90 |
+
|Qwen-Image|`--model_version original`|default, can be omitted|
|
| 91 |
+
|Qwen-Image-Edit|`--model_version edit`| |
|
| 92 |
+
|Qwen-Image-Edit-2509|`--model_version edit-2509`| |
|
| 93 |
+
|Qwen-Image-Edit-2511|`--model_version edit-2511`| |
|
| 94 |
+
|Qwen-Image-Layered|`--model_version layered`| |
|
| 95 |
+
|
| 96 |
+
Note that the `--edit` (for Qwen-Image-Edit) and `--edit_plus` (for Qwen-Image-Edit-2509) flags are also available for backward compatibility.
|
| 97 |
+
|
| 98 |
+
<details>
|
| 99 |
+
<summary>日本語</summary>
|
| 100 |
+
|
| 101 |
+
様々なスクリプトでモデルバージョンを指定する際には、英語版の表を参考にしてください。
|
| 102 |
+
|
| 103 |
+
`--edit`(Qwen-Image-Edit)および`--edit_plus`(Qwen-Image-Edit-2509)フラグも後方互換性のために利用可能です。
|
| 104 |
+
|
| 105 |
+
</details>
|
| 106 |
+
|
| 107 |
+
## Pre-caching / 事前キャッシング
|
| 108 |
+
|
| 109 |
+
If you are using Qwen-Image-Edit or Edit-2509/2511, please also refer to the [Qwen-Image-Edit section](./dataset_config.md#qwen-image-edit-and-qwen-image-edit-2509) of the dataset config documentation.
|
| 110 |
+
|
| 111 |
+
If you are using Qwen-Image-Layered, note the following: Since the Qwen-Image-Layered dataset contains multiple target images, please specify `multiple_target=true` in the dataset config. For details, refer to the [dataset config document](./dataset_config.md#sample-for-image-dataset-with-caption-text-files).
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 115 |
+
|
| 116 |
+
Latent pre-caching uses a dedicated script for Qwen-Image.
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
python src/musubi_tuner/qwen_image_cache_latents.py \
|
| 120 |
+
--dataset_config path/to/toml \
|
| 121 |
+
--vae path/to/vae_model \
|
| 122 |
+
--model_version original
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
- Uses `qwen_image_cache_latents.py`.
|
| 126 |
+
- The `--vae` argument is required.
|
| 127 |
+
- Use the `--model_version` option for Qwen-Image-Edit/Layered training.
|
| 128 |
+
- For Qwen-Image-Edit training, control images specified in the dataset config will also be cached as latents.
|
| 129 |
+
- For Qwen-Image-Layered training, multiple target images will be cached as latents
|
| 130 |
+
|
| 131 |
+
<details>
|
| 132 |
+
<summary>日本語</summary>
|
| 133 |
+
|
| 134 |
+
Qwen-Image-EditまたはEdit-2509/2511を使用する場合は、事前にデータセット設定のドキュメントの[Qwen-Image-Editのセクション](./dataset_config.md#qwen-image-edit-and-qwen-image-edit-2509) も参照してください。
|
| 135 |
+
|
| 136 |
+
Qwen-Image-Layeredを使用する場合は、以下に注意してください。Qwen-Image-Layeredのデータセットには複数枚のターゲット画像が含まれるため、データセット設定で`multiple_target=true`を指定してください。詳細は[データセット設定ドキュメント](./dataset_config.md#sample-for-image-dataset-with-caption-text-files)を参照してください。
|
| 137 |
+
|
| 138 |
+
latentの事前キャッシングはQwen-Image専用のスクリプトを使用します。
|
| 139 |
+
|
| 140 |
+
- `qwen_image_cache_latents.py`を使用します。
|
| 141 |
+
- `--vae`引数を指定してください。
|
| 142 |
+
- Qwen-Image-Editの学習には`--model_version`オプションを適切に指定してください。
|
| 143 |
+
- Qwen-Image-Editの学習では、データセット設定で指定されたコントロール画像もlatentsとしてキャッシュされます
|
| 144 |
+
- Layeredの学習では、複数のターゲット画像がlatentsとしてキャッシュされます。
|
| 145 |
+
|
| 146 |
+
</details>
|
| 147 |
+
|
| 148 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 149 |
+
|
| 150 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 151 |
+
|
| 152 |
+
```bash
|
| 153 |
+
python src/musubi_tuner/qwen_image_cache_text_encoder_outputs.py \
|
| 154 |
+
--dataset_config path/to/toml \
|
| 155 |
+
--text_encoder path/to/text_encoder \
|
| 156 |
+
--batch_size 1 \
|
| 157 |
+
--model_version original
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
- Uses `qwen_image_cache_text_encoder_outputs.py`.
|
| 161 |
+
- Requires the `--text_encoder` (Qwen2.5-VL) argument.
|
| 162 |
+
- Use the `--fp8_vl` option to run the Text Encoder in fp8 mode for VRAM savings for <16GB GPUs.
|
| 163 |
+
- Specify `--model_version` for Qwen-Image-Edit training. Prompts will be processed with control images to generate appropriate embeddings.
|
| 164 |
+
|
| 165 |
+
**Technical details on the difference between `--model_version edit` and `--model_version edit-2509` and `--model_version edit-2511`**
|
| 166 |
+
|
| 167 |
+
Qwen-Image-Edit-2509 and 2511 can use multiple images as control images, so the prompts for obtaining Text Encoder outputs differ from Edit.
|
| 168 |
+
|
| 169 |
+
<details>
|
| 170 |
+
<summary>日本語</summary>
|
| 171 |
+
|
| 172 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 173 |
+
|
| 174 |
+
- `qwen_image_cache_text_encoder_outputs.py`を使用します。
|
| 175 |
+
- `--text_encoder` (Qwen2.5-VL) 引数が必要です。
|
| 176 |
+
- VRAMを節約するために、fp8 でテキストエンコーダを実行する`--fp8_vl`オプションが使用可能です。VRAMが16GB未満のGPU向けです。
|
| 177 |
+
- Qwen-Image-Editの学習には`--model_version`を指定してください。プロンプトがコントロール画像と一緒に処理され、適切な埋め込みが生成されます。
|
| 178 |
+
|
| 179 |
+
**`--model_version edit`と`--model_version edit-2509`および`--model_version edit-2511`の違いに関する技術的詳細**
|
| 180 |
+
|
| 181 |
+
Qwen-Image-Edit-2509および2511では複数枚の画像をコントロール画像として使用できるため、Text Encoder出力の取得のためのプロンプトがEditとは異なります。
|
| 182 |
+
|
| 183 |
+
</details>
|
| 184 |
+
|
| 185 |
+
## LoRA Training / LoRA学習
|
| 186 |
+
|
| 187 |
+
Training uses a dedicated script `qwen_image_train_network.py`.
|
| 188 |
+
|
| 189 |
+
**Standard Qwen-Image Training:**
|
| 190 |
+
|
| 191 |
+
```bash
|
| 192 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/qwen_image_train_network.py \
|
| 193 |
+
--dit path/to/dit_model \
|
| 194 |
+
--vae path/to/vae_model \
|
| 195 |
+
--text_encoder path/to/text_encoder \
|
| 196 |
+
--model_version original \
|
| 197 |
+
--dataset_config path/to/toml \
|
| 198 |
+
--sdpa --mixed_precision bf16 \
|
| 199 |
+
--timestep_sampling shift \
|
| 200 |
+
--weighting_scheme none --discrete_flow_shift 2.2 \
|
| 201 |
+
--optimizer_type adamw8bit --learning_rate 5e-5 --gradient_checkpointing \
|
| 202 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 203 |
+
--network_module networks.lora_qwen_image \
|
| 204 |
+
--network_dim 16 \
|
| 205 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 206 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 207 |
+
```
|
| 208 |
+
|
| 209 |
+
**Qwen-Image-Edit Training:**
|
| 210 |
+
|
| 211 |
+
For training the image editing model, add the `--model_version` option for Qwen-Image-Edit, Edit-2509, or Edit-2511.
|
| 212 |
+
|
| 213 |
+
```bash
|
| 214 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/qwen_image_train_network.py \
|
| 215 |
+
--dit path/to/edit_dit_model \
|
| 216 |
+
--vae path/to/vae_model \
|
| 217 |
+
--text_encoder path/to/text_encoder \
|
| 218 |
+
--dataset_config path/to/toml \
|
| 219 |
+
--model_version edit-2511 \
|
| 220 |
+
...
|
| 221 |
+
```
|
| 222 |
+
|
| 223 |
+
**Qwen-Image-Layered Training:**
|
| 224 |
+
|
| 225 |
+
For training Qwen-Image-Layered models with layered control images, add the `--model_version layered` option.
|
| 226 |
+
|
| 227 |
+
`--remove_first_image_from_target` option is also available to exclude the first target image from the model input/target during training. The first image among multiple target images inferred by the official model in Qwen-Image-Layered is the original image, and the rest are layer images. By using this option, you can train only on the layer images without inferring the original image. This improves training and inference speed and reduces memory usage. The impact on quality is unknown.
|
| 228 |
+
|
| 229 |
+
Note that VAE is different for this architecture. Please use the VAE model for Qwen-Image-Layered.
|
| 230 |
+
|
| 231 |
+
For sample image generation during Qwen-Image-Layered training, please refer to [this document](./sampling_during_training.md#sample-image-generation-during-qwen-image-layered-training--qwen-image-layeredの学習中のサンプルイメージ生成).
|
| 232 |
+
|
| 233 |
+
---
|
| 234 |
+
|
| 235 |
+
Common notes for Qwen-Image/Qwen-Image-Edit/Layered training:
|
| 236 |
+
|
| 237 |
+
- Uses `qwen_image_train_network.py`.
|
| 238 |
+
- **Requires** specifying `--dit`, `--vae`, and `--text_encoder`.
|
| 239 |
+
- `--mixed_precision bf16` is recommended for Qwen-Image training.
|
| 240 |
+
- Use the `--model_version` option for Qwen-Image-Edit, Edit-2509, or Edit-2511 training with control images, or for Qwen-Image-Layered training with multiple target images.
|
| 241 |
+
- Memory saving options like `--fp8_base` and `--fp8_scaled` (for DiT), and `--fp8_vl` (for Text Encoder) are available.
|
| 242 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 243 |
+
- `--disable_numpy_memmap`: Disables numpy memory mapping for model loading, loading with standard file read. Increases RAM usage but significantly speeds up model loading in some cases.
|
| 244 |
+
|
| 245 |
+
`--fp8_vl` is recommended for GPUs with less than 16GB of VRAM.
|
| 246 |
+
|
| 247 |
+
`--sdpa` uses PyTorch's scaled dot product attention. Other options like `--xformers` and `--flash_attn` are available. `flash3` cannot be used currently.
|
| 248 |
+
|
| 249 |
+
If you specify `--split_attn`, the attention computation will be split, slightly reducing memory usage. Please specify `--split_attn` if you are using anything other than `--sdpa`.
|
| 250 |
+
|
| 251 |
+
`--timestep_sampling` allows you to choose the sampling method for the timesteps. `shift` with `--discrete_flow_shift` is the default. `qwen_shift` is also available. `qwen_shift` is a same method during inference. It uses the dynamic shift value based on the resolution of each image (typically around 2.2 for 1328x1328 images).
|
| 252 |
+
|
| 253 |
+
`--discrete_flow_shift` is set quite low for Qwen-Image during inference (as described), so a lower value than other models may be preferable.
|
| 254 |
+
|
| 255 |
+
Don't forget to specify `--network_module networks.lora_qwen_image`.
|
| 256 |
+
|
| 257 |
+
The appropriate settings for each parameter are unknown. Feedback is welcome.
|
| 258 |
+
|
| 259 |
+
### VRAM Usage Estimates with Memory Saving Options
|
| 260 |
+
|
| 261 |
+
For 1024x1024 training with the batch size of 1, `--mixed_precision bf16` and `--gradient_checkpointing` is enabled and `--xformers` is used.
|
| 262 |
+
|
| 263 |
+
|options|VRAM Usage|
|
| 264 |
+
|-------|----------|
|
| 265 |
+
|no |42GB|
|
| 266 |
+
|`--fp8_base --fp8_scaled`|30GB|
|
| 267 |
+
|+ `--blocks_to_swap 16`|24GB|
|
| 268 |
+
|+ `--blocks_to_swap 45`|12GB|
|
| 269 |
+
|
| 270 |
+
64GB main RAM system is recommended with `--blocks_to_swap`.
|
| 271 |
+
|
| 272 |
+
If `--blocks_to_swap` is more than 45, the main RAM usage will increase significantly.
|
| 273 |
+
|
| 274 |
+
Qwen-Image-Edit training requires additional memory for the control images.
|
| 275 |
+
|
| 276 |
+
**Note:** The `--disable_numpy_memmap` option speeds up model loading in some cases with using standard file read instead of using numpy memory mapping. If you encounter slow model weight loading time, this option may help.
|
| 277 |
+
|
| 278 |
+
<details>
|
| 279 |
+
<summary>日本語</summary>
|
| 280 |
+
|
| 281 |
+
Qwen-Imageの学習は専用のスクリプト`qwen_image_train_network.py`を使用します。コマンドライン例は英語版を参照してください。
|
| 282 |
+
|
| 283 |
+
**Qwen-Image-Editの学習について**
|
| 284 |
+
|
| 285 |
+
画像編集モデルの学習には、Qwen-Image-Edit、Edit-2509、またはEdit-2511の`--model_version`オプションを追加してください。
|
| 286 |
+
|
| 287 |
+
**Qwen-Image-Layeredの学習について**
|
| 288 |
+
|
| 289 |
+
レイヤード制御画像を使用したQwen-Image-Layeredモデルの学習には、`--model_version layered`オプションを追加してください。
|
| 290 |
+
|
| 291 |
+
`--remove_first_image_from_target`オプションも利用可能で、学習中に最初のターゲット画像をモデルの入力/ターゲットから除外します。Qwen-Image-Layeredでは公式モデルでは推論される複数枚の画像のうち、最初の画像は元の画像であり、残りがレイヤー画像となっています。このオプションを使用すると、元の画像を推論せずにレイヤー画像のみを学習できます。これにより学習、推論の速度が向上し、メモリ使用量も削減されます。品質への影響は不明です。
|
| 292 |
+
|
| 293 |
+
このアーキテクチャではVAEが異なることに注意してください。Qwen-Image-Layered用のVAEモデルを使用してください。
|
| 294 |
+
|
| 295 |
+
Qwen-Image-Layeredにおける学習中のサンプル画像生成については、[こちらのドキュメント](./sampling_during_training.md#sample-image-generation-during-qwen-image-layered-training--qwen-image-layeredの学習中のサンプルイメージ生成)を参照してください。
|
| 296 |
+
|
| 297 |
+
---
|
| 298 |
+
|
| 299 |
+
Qwen-Image/Edit/Layered学習に共通の注意点:
|
| 300 |
+
|
| 301 |
+
- `qwen_image_train_network.py`を使用します。
|
| 302 |
+
- `--dit`、`--vae`、`--text_encoder`を指定する必要があります。
|
| 303 |
+
- Qwen-Imageの学習には`--mixed_precision bf16`を推奨します。
|
| 304 |
+
- コントロール画像を使ったQwen-Image-Edit/Edit-2509/Edit-2511の学習、複数ターゲット画像を使ったQwen-Image-Layeredの学習には、`--model_version`オプションを適切に指定してください。
|
| 305 |
+
- `--fp8_base`や`--fp8_scaled`(DiT用)、`--fp8_vl`(テキストエンコーダー用)などのメモリ節約オプションが利用可能です。
|
| 306 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。
|
| 307 |
+
- `--disable_numpy_memmap`: モデル読み込み時のnumpyメモリマッピングを無効化し、標準のファイル読み込みで読み込みを行います。RAM使用量は増加しますが、場合によってはモデルの読み込みが大幅に高速化されます。もしモデルの重みの読み込み時間が遅い場合は、このオプションが役立つかもしれません。
|
| 308 |
+
|
| 309 |
+
GPUのVRAMが16GB未満の場合は、`--fp8_vl`を推奨します。
|
| 310 |
+
|
| 311 |
+
`--sdpa`はPyTorchのscaled dot product attentionを用います。他に `--xformers`、`--flash_attn` があります。`--flash3`は現在使用できません。
|
| 312 |
+
|
| 313 |
+
`--split_attn` を指定すると、attentionの計算が分割され、メモリ使用量がわ��かに削減されます。`--sdpa` 以外を使用する場合は、`--split_attn` を指定してください。
|
| 314 |
+
|
| 315 |
+
`--timestep_sampling` では、タイムステップのサンプリング方法を選択できます。`shift` と `--discrete_flow_shift` の組み合わせがデフォルトです。`qwen_shift` も利用可能です。`qwen_shift` は推論時と同じ方法で、各画像の解像度に基づいた動的シフト値を使用します(通常、1328x1328画像の場合は約2.2です)。
|
| 316 |
+
|
| 317 |
+
`--discrete_flow_shift`は、Qwen-Imageでは前述のように推論時にかなり低めなため、他のモデルよりも低めが良いかもしれません。
|
| 318 |
+
|
| 319 |
+
`--network_module networks.lora_qwen_image`を指定することを忘れないでください。
|
| 320 |
+
|
| 321 |
+
それぞれのパラメータの適切な設定は不明です。フィードバックをお待ちしています。
|
| 322 |
+
|
| 323 |
+
### メモリ節約オプションを使用した場合のVRAM使用量の目安
|
| 324 |
+
|
| 325 |
+
1024x1024の学習でバッチサイズ1の場合、`--mixed_precision bf16`と`--gradient_checkpointing`を指定し、`--xformers`を使用した場合のVRAM使用量の目安は以下の通りです。
|
| 326 |
+
|
| 327 |
+
|オプション|VRAM使用量|
|
| 328 |
+
|-------|----------|
|
| 329 |
+
|no |42GB|
|
| 330 |
+
|`--fp8_base --fp8_scaled`|30GB|
|
| 331 |
+
|+ `--blocks_to_swap 16`|24GB|
|
| 332 |
+
|+ `--blocks_to_swap 45`|12GB|
|
| 333 |
+
|
| 334 |
+
`--blocks_to_swap`を使用する場合は、64GBのメインRAMを推奨します。
|
| 335 |
+
|
| 336 |
+
`--blocks_to_swap`が45を超えると、メインRAMの使用量が大幅に増加します。
|
| 337 |
+
|
| 338 |
+
Qwen-Image-Editの学習では、コントロール画像のために追加のメモリが必要です。
|
| 339 |
+
|
| 340 |
+
**備考:** `--disable_numpy_memmap`オプションは、numpyメモリマッピングの代わりに標準のファイル読み込みを使用することで、場合によってはモデルの読み込みを高速化します。モデルの重みの読み込み時間が遅い場合は、このオプションが役立つかもしれません。
|
| 341 |
+
|
| 342 |
+
</details>
|
| 343 |
+
|
| 344 |
+
## Finetuning
|
| 345 |
+
|
| 346 |
+
Finetuning uses a dedicated script `qwen_image_train.py`. This script performs full finetuning of the model, not LoRA. Sample usage is as follows:
|
| 347 |
+
|
| 348 |
+
```bash
|
| 349 |
+
accelerate launch --num_cpu_threads_per_process 1 src/musubi_tuner/qwen_image_train.py \
|
| 350 |
+
--dit path/to/dit_model \
|
| 351 |
+
--vae path/to/vae_model \
|
| 352 |
+
--text_encoder path/to/text_encoder \
|
| 353 |
+
--model_version original \
|
| 354 |
+
--dataset_config path/to/toml \
|
| 355 |
+
--sdpa --mixed_precision bf16 --gradient_checkpointing \
|
| 356 |
+
--optimizer_type adafactor --learning_rate 1e-6 --fused_backward_pass \
|
| 357 |
+
--optimizer_args "relative_step=False" "scale_parameter=False" "warmup_init=False" \
|
| 358 |
+
--max_grad_norm 0 --lr_scheduler constant_with_warmup --lr_warmup_steps 10 \
|
| 359 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 360 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 361 |
+
--output_dir path/to/output_dir --output_name name-of-model
|
| 362 |
+
```
|
| 363 |
+
|
| 364 |
+
- Uses `qwen_image_train.py`.
|
| 365 |
+
- Finetuning requires a large amount of VRAM. The use of memory saving options is strongly recommended.
|
| 366 |
+
- `--full_bf16`: Loads the model weights in bfloat16 format to significantly reduce VRAM usage.
|
| 367 |
+
- `--optimizer_type adafactor`: Using Adafactor is recommended for finetuning.
|
| 368 |
+
- `--fused_backward_pass`: Reduces VRAM usage during the backward pass when using Adafactor.
|
| 369 |
+
- `--mem_eff_save`: Reduces main memory (RAM) usage when saving checkpoints.
|
| 370 |
+
- `--blocks_to_swap`: Swaps model blocks between VRAM and main memory to reduce VRAM usage. This is effective when VRAM is limited.
|
| 371 |
+
- `--disable_numpy_memmap`: Disables numpy memory mapping for model loading, loading with standard file read. Increases RAM usage but may speed up model loading in some cases.
|
| 372 |
+
|
| 373 |
+
`--full_bf16` reduces VRAM usage by about 20GB but may impact model accuracy as the weights are kept in bfloat16. Note that the optimizer state is still kept in float32. In addition, it is recommended to use this with an optimizer that supports stochastic rounding. In this repository, Adafactor optimizer with `--fused_backward_pass` option supports stochastic rounding.
|
| 374 |
+
|
| 375 |
+
When using `--mem_eff_save`, please note that traditional saving methods are still used when saving the optimizer state in `--save_state`, requiring about 40GB of main memory.
|
| 376 |
+
|
| 377 |
+
`--model_version` option allows for finetuning of Qwen-Image-Edit/Edit-2509/Edit-2511 (unverified).
|
| 378 |
+
|
| 379 |
+
### Recommended Settings
|
| 380 |
+
|
| 381 |
+
We are still exploring the optimal settings. The configurations above are just examples, so please adjust them as needed. We welcome your feedback.
|
| 382 |
+
|
| 383 |
+
If you have ample VRAM, you can use any optimizer of your choice. `--full_bf16` is not recommended.
|
| 384 |
+
|
| 385 |
+
For limited VRAM environments (e.g., 48GB or less), you may need to use `--full_bf16`, the Adafactor optimizer, and `--fused_backward_pass`. Settings above are the recommended options for that case. Please adjust `--lr_warmup_steps` to a value between approximately 10 and 100.
|
| 386 |
+
|
| 387 |
+
`--fused_backward_pass` is not currently compatible with gradient accumulation, and max grad norm may not function as expected, so it is recommended to specify `--max_grad_norm 0`.
|
| 388 |
+
|
| 389 |
+
If your VRAM is even more constrained, you can enable block swapping by specifying a value for `--blocks_to_swap`.
|
| 390 |
+
|
| 391 |
+
Experience with other models suggests that the learning rate may need to be reduced significantly; something in the range of 1e-6 to 1e-5 might be a good place to start.
|
| 392 |
+
|
| 393 |
+
<details>
|
| 394 |
+
<summary>日本語</summary>
|
| 395 |
+
|
| 396 |
+
Finetuningは専用のスクリプト`qwen_image_train.py`を使用します。このスクリプトはLoRAではなく、モデル全体のfinetuningを行います。
|
| 397 |
+
|
| 398 |
+
- `qwen_image_train.py`を使用します。
|
| 399 |
+
- Finetuningは大量のVRAMを必要とします。メモリ節約オプションの使用を強く推奨します。
|
| 400 |
+
- `--full_bf16`: モデルの重みをbfloat16形式で読み込み、VRAM使用量を大幅に削減します。
|
| 401 |
+
- `--optimizer_type adafactor`: FinetuningではAdafactorの使用が推奨されます。
|
| 402 |
+
- `--fused_backward_pass`: Adafactor使用時に、backward pass中のVRAM使用量を削減します。
|
| 403 |
+
- `--mem_eff_save`: チェックポイント保存時のメインメモリ(RAM)使用量を削減します。
|
| 404 |
+
- `--blocks_to_swap`: モデルのブロックをVRAMとメインメモリ間でスワップし、VRAM使用量を削減します。VRAMが少ない場合に有効です。
|
| 405 |
+
- `--disable_numpy_memmap`: モデル読み込み時のnumpyメモリマッピングを無効化し、標準のファイル読み込みで読み込みを行います。RAM使用量は増加しますが、場合によってはモデルの読み込みが高速化されます。
|
| 406 |
+
|
| 407 |
+
`--full_bf16`はVRAM使用量を約20GB削減しますが、重みがbfloat16で保持されるため、モデルの精度に影響を与える可能性があります。オプティマイザの状態はfloat32で保持されます。また、効率的な学習のために、stochastic roundingをサポートするオプティマイザとの併用が推奨されます。このリポジトリでは、`adafactor`オプティマイザに`--fused_backward_pass`オプションの組み合わせでstochastic roundingをサポートしています。
|
| 408 |
+
|
| 409 |
+
`--mem_eff_save`を使用する場合でも、`--save_state`においてはオプティマイザの状態を保存する際に従来の保存方法が依然として使用されるため、約40GBのメインメモリが必要であることに注意してください。
|
| 410 |
+
|
| 411 |
+
`--model_version`オプションにより、Qwen-Image-Edit/Edit-2509/Edit-2511のfinetuningが可能です(未検証)。
|
| 412 |
+
|
| 413 |
+
### 推奨設定
|
| 414 |
+
|
| 415 |
+
最適な設定はまだ調査中です。上記の構成はあくまで一例ですので、必要に応じて調整してください。フィードバックをお待ちしております。
|
| 416 |
+
|
| 417 |
+
十分なVRAMがある場合は、お好みのオプティマイザを使用できます。`--full_bf16`は推奨されません。
|
| 418 |
+
|
| 419 |
+
VRAMが限られている環境(例:48GB以下)の場合は、`--full_bf16`、Adafactorオプティマイザ、および`--fused_backward_pass`を使用する必要があるかもしれません。上記の設定はその場合の推奨オプションです。`--lr_warmup_steps`は約10から100の間の値に調整してください。
|
| 420 |
+
|
| 421 |
+
現時点では`--fused_backward_pass`はgradient accumulationに対応していません。またmax grad normも想定通りに動作しない可能性があるため、`--max_grad_norm 0`を指定することを推奨します。
|
| 422 |
+
|
| 423 |
+
さらにVRAMが制約されている場合は、`--blocks_to_swap`に値を指定してブロックスワッピングを有効にできます。
|
| 424 |
+
|
| 425 |
+
</details>
|
| 426 |
+
|
| 427 |
+
## Inference / 推論
|
| 428 |
+
|
| 429 |
+
Inference uses a dedicated script `qwen_image_generate_image.py`.
|
| 430 |
+
|
| 431 |
+
**Standard Qwen-Image Inference:**
|
| 432 |
+
|
| 433 |
+
```bash
|
| 434 |
+
python src/musubi_tuner/qwen_image_generate_image.py \
|
| 435 |
+
--dit path/to/dit_model \
|
| 436 |
+
--vae path/to/vae_model \
|
| 437 |
+
--text_encoder path/to/text_encoder \
|
| 438 |
+
--prompt "A cat" \
|
| 439 |
+
--negative_prompt " " \
|
| 440 |
+
--image_size 1024 1024 --infer_steps 25 \
|
| 441 |
+
--guidance_scale 4.0 \
|
| 442 |
+
--attn_mode sdpa \
|
| 443 |
+
--save_path path/to/save/dir \
|
| 444 |
+
--output_type images \
|
| 445 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 446 |
+
```
|
| 447 |
+
|
| 448 |
+
**Qwen-Image-Edit Inference:**
|
| 449 |
+
|
| 450 |
+
```bash
|
| 451 |
+
python src/musubi_tuner/qwen_image_generate_image.py \
|
| 452 |
+
--dit path/to/edit_dit_model \
|
| 453 |
+
--vae path/to/vae_model \
|
| 454 |
+
--text_encoder path/to/text_encoder \
|
| 455 |
+
--model_version edit-2511 \
|
| 456 |
+
--control_image_path path/to/control_image.png \
|
| 457 |
+
--prompt "Change the background to a beach" \
|
| 458 |
+
--resize_control_to_official_size \
|
| 459 |
+
...
|
| 460 |
+
```
|
| 461 |
+
|
| 462 |
+
**Qwen-Image-Layered Inference:**
|
| 463 |
+
|
| 464 |
+
Please specify `--model_version layered` for Qwen-Image-Layered inference. Note that VAE is different for this architecture. Please use the VAE model for Qwen-Image-Layered.
|
| 465 |
+
|
| 466 |
+
---
|
| 467 |
+
|
| 468 |
+
- Uses `qwen_image_generate_image.py`.
|
| 469 |
+
- **Requires** specifying `--dit`, `--vae`, and `--text_encoder`.
|
| 470 |
+
- `--image_size` is the size of the generated image, height and width are specified in that order.
|
| 471 |
+
- `--prompt`: Prompt for generation.
|
| 472 |
+
- `--guidance_scale` controls the classifier-free guidance scale.
|
| 473 |
+
- For Qwen-Image-Edit:
|
| 474 |
+
- Use the `--model_version` option to specify the version for image editing mode. For example, `--model_version edit-2511` or `--model_version layered`.
|
| 475 |
+
- `--control_image_path`: Path to the control (reference) image for editing. Edit-2509 also supports multiple arguments (e.g., `--control_image_path img1.png img2.png img3.png`).
|
| 476 |
+
- `--resize_control_to_image_size`: Resize control image to match the specified image size.
|
| 477 |
+
- `--resize_control_to_official_size`: Resize control image to official size (1M pixels keeping aspect ratio). **Recommended for better results with Edit models.** (Mandatory for 2511)
|
| 478 |
+
- Above two options are mutually exclusive. If both are not specified, the control image will be used at its original resolution.
|
| 479 |
+
- `--append_original_name`: When saving edited images, appends the original base name of the control image to the output file name.
|
| 480 |
+
- For Qwen-Image-Layered:
|
| 481 |
+
- Specify the image to be layered in `--control_image_path`.
|
| 482 |
+
- Specify the number of layers to output in `--output_layers`. (Since Qwen-Image-Layered also generates the original image, it generates one more than the specified number. If `--remove_first_image_from_target` was used during training, specify "the number of layers - 1" here to match the number of generated images.)
|
| 483 |
+
- `--resize_control_to_image_size`: Resize control image to match the specified image size. **Recommended for better results with Layered models.**
|
| 484 |
+
- Memory saving options like `--fp8_scaled` (for DiT) are available.
|
| 485 |
+
- `--text_encoder_cpu` enables CPU inference for the text encoder. Recommended for systems with limited GPU resources (less than 16GB VRAM).
|
| 486 |
+
- LoRA loading options (`--lora_weight`, `--lora_multiplier`) are available.
|
| 487 |
+
|
| 488 |
+
You can specify the discrete flow shift using `--flow_shift`. If omitted, the default value (dynamic shifting based on the image size) will be used.
|
| 489 |
+
|
| 490 |
+
`xformers`, `flash` and `sageattn` are also available as attention modes. However `sageattn` is not confirmed to work yet.
|
| 491 |
+
|
| 492 |
+
<details>
|
| 493 |
+
<summary>日本語</summary>
|
| 494 |
+
|
| 495 |
+
Qwen-Imageの推論は専用のスクリプト`qwen_image_generate_image.py`を使用します。コマンド例は英語版のドキュメントを参照してください。
|
| 496 |
+
|
| 497 |
+
**Qwen-Image-Layeredの推論について**
|
| 498 |
+
|
| 499 |
+
Qwen-Image-Layeredの推論には`--model_version layered`を指定してください。このアーキテクチャではVAEが異なることに注意してください。Qwen-Image-Layered用のVAEモデルを使用してください。
|
| 500 |
+
|
| 501 |
+
---
|
| 502 |
+
|
| 503 |
+
- `qwen_image_generate_image.py`を使用します。
|
| 504 |
+
- `--dit`、`--vae`、`--text_encoder`を指定する必要があります。
|
| 505 |
+
- `--image_size`は生成する画像のサイズで、高さと幅をその順番で指定します。
|
| 506 |
+
- `--prompt`: 生成用のプロンプトです。
|
| 507 |
+
- `--guidance_scale`は、classifier-freeガイダンスのスケールを制御します。
|
| 508 |
+
- Qwen-Image-Editの場合:
|
| 509 |
+
- 画像編集モードを有効にするために`--model_version`オプションを適切に指定してください。
|
| 510 |
+
- `--control_image_path`: 編集用のコントロール(参照)画像へのパスです。 Edit-2509では複数の引数もサポートしています(例: `--control_image_path img1.png img2.png img3.png`)。
|
| 511 |
+
- `--resize_control_to_image_size`: コントロール画像を指定した画像サイズに合わせてリサイズします。
|
| 512 |
+
- `--resize_control_to_official_size`: コントロール画像を公式サイズ(アスペクト比を保ちながら100万ピクセル)にリサイズします。指定を推奨します(特に2511では必須)。
|
| 513 |
+
- 上記2つのオプションは同時に指定できません。両方とも指定しない場合、制御画像はそのままの解像度で使用されます。
|
| 514 |
+
- `--append_original_name`: 編集された画像を保存する際に、コントロール画像の元の基本名を出力ファイル名に追加します。
|
| 515 |
+
- Qwen-Image-Layeredの場合:
|
| 516 |
+
- `--control_image_path`に、分割対象の画像を指定してください。
|
| 517 |
+
- `--output_layers`に出力するレイヤー数を指定してください。(Qwen-Image-Layeredは元画像も生成するため、指定した数より1枚多く生成されます。もし学習時に`--remove_first_image_from_target`を使用していた場合は、ここには「レイヤー数-1」を指定してください。)
|
| 518 |
+
- `--resize_control_to_image_size`: コントロール画像を指定した画像サイズに合わせてリサイズします。Layeredモデルでより良い結果を得るために推奨されます。
|
| 519 |
+
- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。
|
| 520 |
+
- `--text_encoder_cpu`を指定するとテキストエンコーダーをCPUで推論します。GPUのVRAMが16GB未満のシステムでは、CPU推論を推奨します。
|
| 521 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`)が利用可能です。
|
| 522 |
+
|
| 523 |
+
`--flow_shift`を指定することで、離散フローシフトを設定できます。省略すると、デフォルト値(画像サイズに基づく動的シフト)が使用されます。
|
| 524 |
+
|
| 525 |
+
`xformers`、`flash`、`sageattn`もattentionモードとして利用可能です。ただし、`sageattn`はまだ動作確認が取れていません。
|
| 526 |
+
|
| 527 |
+
</details>
|
| 528 |
+
|
| 529 |
+
### Inpainting and Reference Consistency Mask (RCM)
|
| 530 |
+
|
| 531 |
+
For Qwen-Image-Edit, inpainting with a mask image and a feature called Reference Consistency Mask (RCM) are available to prevent unintended changes in the background or other areas.
|
| 532 |
+
|
| 533 |
+
**These features are only available in Edit/Edit-plus mode, and require the first control image to be the same size as the output image.** They cannot be used at the same time.
|
| 534 |
+
|
| 535 |
+
- `--mask_path`: Specifies the path to a mask image for inpainting. The image should be black and white, where white areas indicate the regions to be inpainted (changed) and black areas indicate the regions to be preserved.
|
| 536 |
+
- `--rcm_threshold`: Enables the Reference Consistency Mask (RCM) feature. RCM is a technique that dynamically creates a mask during the denoising process to prevent unintended modifications to areas that should remain unchanged. It compares the latents of the current generation step with the latents of the control image and protects areas with small differences. Lower values for the threshold result in a larger inpainting area. Typical values are 0.01 to 0.1 for absolute threshold, 0.1 to 0.5 for relative threshold.
|
| 537 |
+
- `--rcm_relative_threshold`: If this flag is set, the `--rcm_threshold` is treated as a relative value (0.0-1.0) to the maximum difference observed in the current step. This can provide more stable results across different steps. If not set, the threshold is an absolute value.
|
| 538 |
+
- `--rcm_kernel_size`: Specifies the kernel size for a Gaussian blur applied before calculating the difference. This helps to create a smoother, more stable mask. Default is 3.
|
| 539 |
+
- `--rcm_dilate_size`: Specifies the size to dilate (expand) the inpainting region of the generated mask. This is useful for ensuring that the edges of the area you want to change are properly modified. Default is 0 (no dilation).
|
| 540 |
+
- `--rcm_debug_save`: When this flag is set, the dynamically generated RCM mask for each step will be saved in the output directory. This is very useful for debugging and adjusting the RCM parameters.
|
| 541 |
+
|
| 542 |
+
**Example using RCM:**
|
| 543 |
+
|
| 544 |
+
```bash
|
| 545 |
+
python src/musubi_tuner/qwen_image_generate_image.py \
|
| 546 |
+
--dit path/to/edit_dit_model \
|
| 547 |
+
--vae path/to/vae_model \
|
| 548 |
+
--text_encoder path/to/text_encoder \
|
| 549 |
+
--edit \
|
| 550 |
+
--control_image_path path/to/control_image.png \
|
| 551 |
+
--prompt "Change her dress to red" \
|
| 552 |
+
--image_size 1024 1024 \
|
| 553 |
+
--rcm_threshold 0.2 --rcm_relative_threshold \
|
| 554 |
+
--rcm_kernel_size 3 --rcm_dilate_size 1 \
|
| 555 |
+
...
|
| 556 |
+
```
|
| 557 |
+
|
| 558 |
+
#### Important Usage Notes
|
| 559 |
+
|
| 560 |
+
- **Compatibility:** Both RCM and the standard inpainting mask are only effective in **edit mode** (when a control image is provided).
|
| 561 |
+
- **Requirement:** To use these features, the initial control image must have the **same dimensions** as the final output image. The script will show an error and disable RCM if the sizes do not match.
|
| 562 |
+
- **Exclusivity:** RCM and `--mask_path` cannot be used at the same time.
|
| 563 |
+
- **Debugging Tip:** When first using RCM, it is highly recommended to use the `--rcm_debug_save` flag. This will save the masks to the output directory, allowing you to visually inspect how the `threshold` and other parameters are affecting the mask generation.
|
| 564 |
+
|
| 565 |
+
#### Technical Details of RCM
|
| 566 |
+
|
| 567 |
+
Reference Consistency Mask (RCM) addresses a common issue in Qwen-Image-Edit where the generated image has a slight positional drift or misalignment compared to the control image. RCM significantly improves the structural stability and positional accuracy of the image editing process.
|
| 568 |
+
|
| 569 |
+
This feature is implemented based on the idea of dynamically creating a mask during the denoising loop to "anchor" the parts of the image that should remain consistent with the reference (control) image.
|
| 570 |
+
|
| 571 |
+
**How RCM Works**
|
| 572 |
+
|
| 573 |
+
For each step in the denoising loop, RCM performs the following actions:
|
| 574 |
+
1. It calculates a "noisy" version of the original control latent, corresponding to the current timestep `t`.
|
| 575 |
+
2. It computes the difference between the current generation latent and the noisy control latent.
|
| 576 |
+
3. Areas with a small difference are considered "consistent" and are masked to be preserved. The sensitivity is controlled by the `rcm_threshold`.
|
| 577 |
+
4. This mask is then used to reset the consistent regions of the current latent back to the state of the noisy reference latent, just before the `scheduler.step` is called.
|
| 578 |
+
|
| 579 |
+
This self-correcting mechanism prevents the accumulation of positional errors throughout the denoising process, ensuring that unchanged elements like backgrounds or faces stay perfectly aligned.
|
| 580 |
+
|
| 581 |
+
<details>
|
| 582 |
+
<summary>日本語</summary>
|
| 583 |
+
|
| 584 |
+
Qwen-Image-Editにおいて、背景などを意図せず変更してしまうことを防ぐため、マスク画像を使ったInpaintingと、Reference Consistency Mask (RCM) という機能が利用可能です。
|
| 585 |
+
|
| 586 |
+
**これらの機能はEdit/Edit-plusモードでのみ利用可能で、かつ最初のコントロール画像が出力画像と同じサイズである必要があります。** また、同時に使用することはできません。
|
| 587 |
+
|
| 588 |
+
- `--mask_path`: Inpainting用のマスク画像へのパスを指定します。白黒のマスク画像で、白の領域がInpainting(変更)される領域、黒の領域が維持される領域を示します。
|
| 589 |
+
- `--rcm_threshold`: Reference Consistency Mask (RCM) 機能を有効にします。RCMは、Denoisingの過程で動的にマスクを生成し、変更すべきでない箇所が意図せず変更されるのを防ぐ技術です。現在の生成ステップのlatentとコントロール画像のlatentを比較し、差が小さい部分を保護します。閾値が低いほど、Inpainting領域は大きくなります。
|
| 590 |
+
- `--rcm_relative_threshold`: このフラグを指定すると、`--rcm_threshold`がそのステップで観測された差分の最大値に対する相対的な値(0.0~1.0)として扱われます。これにより、ステップごとに安定した結果が得られやすくなります。指定しない場合は絶対値として扱われます。絶対値の場合は0.01~0.1、相対値の場合は0.1~0.5が典型的な値です。
|
| 591 |
+
- `--rcm_kernel_size`: 差分を計算する前に適用するガウシアンブラーのカーネルサイズを指定します。これにより、より滑らかで安定したマスクが生成されます。デフォルトは3です。
|
| 592 |
+
- `--rcm_dilate_size`: 生成されたマスクのInpainting領域を膨張(dilate)させるサイズを指定します。変更したい領域の境界部分が確実に変更されるようにしたい場合に便利です。デフォルトは0(膨張なし)です。
|
| 593 |
+
- `--rcm_debug_save`: このフラグを指定すると、各ステップで動的に生成されたRCMのマスクが出力ディレクトリに保存されます。RCMのパラメータを調整する際のデバッグに非常に役立ちます。
|
| 594 |
+
|
| 595 |
+
**重要な使用上の注意**
|
| 596 |
+
|
| 597 |
+
- **互換性:** RCMと標準のinpaintingマスクは、どちらも**Editモード**(制御画像が提供されている場合)でのみ有効です。
|
| 598 |
+
- **要件:** これらの機能を使用するには、最初の制御画像が最終的な出力画像と**同じサイズ**である必要があります。サイズが一致しない場合、スクリプトはエラーを表示し、RCMを無効にします。
|
| 599 |
+
- **排他性:** RCMと`--mask_path`は同時に使用できません。
|
| 600 |
+
- **デバッグのヒント:** 初めてRCMを使用する際は、`--rcm_debug_save`フラグを使用することを強く推奨します。これによりマスクが出力ディレクトリに保存され、`threshold`などのパラメータがマスク生成にどのように影響しているかを視覚的に確認できます。
|
| 601 |
+
|
| 602 |
+
**RCMの技術的詳細**
|
| 603 |
+
|
| 604 |
+
Reference Consistency Mask (RCM) は、Qwen-Image-Editにおいて、生成画像が制御画像と比較してわずかな位置ずれを起こすという一般的な問題を解決するためのものです。RCMは、編集プロセスにおける構造的な安定性と位置精度を大幅に向上させます。
|
| 605 |
+
|
| 606 |
+
この機能は、denoisingループ中に動的にマスクを生成し、参照元(制御画像)と一致すべき部分を「固定(アンカー)」するというアイデアに基づいています。
|
| 607 |
+
|
| 608 |
+
**RCMの動作原理**
|
| 609 |
+
|
| 610 |
+
RCMは、denoisingループの各ステップで以下の処理を実行します。
|
| 611 |
+
1. 現在のタイムステップ`t`に対応する、元の制御画像の潜在変数にノイズを加えたバージョンを計算します。
|
| 612 |
+
2. 現在の生成中latentと、ノイズ付加済み制御latentとの差分を計算します。
|
| 613 |
+
3. 差分が小さい領域を「一致している」とみなし、その部分を保持するようにマスクします。この感度は`rcm_threshold`によって制御されます。
|
| 614 |
+
4. そして、このマスクを使い、`scheduler.step`が呼び出される直前に、一致している領域をノイズ付加済み参照latentの状態にリセットします。
|
| 615 |
+
|
| 616 |
+
この自己修正的なメカニズムにより、denoisingプロセス全体を通して位置誤差が蓄積されるのを防ぎ、背景や顔のような変更しない要素が完全に位置ずれなく維持されることを保証します。
|
| 617 |
+
|
| 618 |
+
</details>
|
docs/sampling_during_training.md
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# Sampling during training / 学習中のサンプル画像生成
|
| 4 |
+
|
| 5 |
+
By preparing a prompt file, you can generate sample images during training.
|
| 6 |
+
|
| 7 |
+
Please be aware that it consumes a considerable amount of VRAM, so be careful when generating sample images for videos with a large number of frames. Also, since it takes time to generate, adjust the frequency of sample image generation as needed.
|
| 8 |
+
|
| 9 |
+
<details>
|
| 10 |
+
<summary>日本語</summary>
|
| 11 |
+
|
| 12 |
+
プロンプトファイルを用意することで、学習中にサンプル画像を生成することができます。
|
| 13 |
+
|
| 14 |
+
VRAMをそれなりに消費しますので、特にフレーム数が多い動画を生成する場合は注意してください。また生成には時間がかかりますので、サンプル画像生成の頻度は適宜調整してください。
|
| 15 |
+
</details>
|
| 16 |
+
|
| 17 |
+
## How to use / 使い方
|
| 18 |
+
|
| 19 |
+
### Command line options for training with sampling / サンプル画像生成に関連する学習時のコマンドラインオプション
|
| 20 |
+
|
| 21 |
+
Example of command line options for training with sampling / 記述例:
|
| 22 |
+
|
| 23 |
+
```bash
|
| 24 |
+
--vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt
|
| 25 |
+
--vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128
|
| 26 |
+
--text_encoder1 path/to/ckpts/text_encoder
|
| 27 |
+
--text_encoder2 path/to/ckpts/text_encoder_2
|
| 28 |
+
--sample_prompts /path/to/prompt_file.txt
|
| 29 |
+
--sample_every_n_epochs 1 --sample_every_n_steps 1000 --sample_at_first
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
`--vae`, `--vae_chunk_size`, `--vae_spatial_tile_sample_min_size`, `--text_encoder1`, `--text_encoder2` are the same as when generating images, so please refer to [here](/README.md#inference) for details. `--fp8_llm` can also be specified.
|
| 33 |
+
|
| 34 |
+
`--sample_prompts` specifies the path to the prompt file used for sample image generation. Details are described below.
|
| 35 |
+
|
| 36 |
+
`--sample_every_n_epochs` specifies how often to generate sample images in epochs, and `--sample_every_n_steps` specifies how often to generate sample images in steps.
|
| 37 |
+
|
| 38 |
+
`--sample_at_first` is specified when generating sample images at the beginning of training.
|
| 39 |
+
|
| 40 |
+
Sample images and videos are saved in the `sample` directory in the directory specified by `--output_dir`. They are saved as `.png` for still images and `.mp4` for videos.
|
| 41 |
+
|
| 42 |
+
<details>
|
| 43 |
+
<summary>日本語</summary>
|
| 44 |
+
|
| 45 |
+
`--vae`、`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`、`--text_encoder1`、`--text_encoder2`は、画像生成時と同様ですので、詳細は[こちら](/README.ja.md#推論)を参照してください。`--fp8_llm`も指定可能です。
|
| 46 |
+
|
| 47 |
+
`--sample_prompts`は、サンプル画像生成に使用するプロンプトファイルのパスを指定します。詳細は後述します。
|
| 48 |
+
|
| 49 |
+
`--sample_every_n_epochs`は、何エポックごとにサンプル画像を生成するかを、`--sample_every_n_steps`は、何ステップごとにサンプル画像を生成するかを指定します。
|
| 50 |
+
|
| 51 |
+
`--sample_at_first`は、学習開始時にサンプル画像を生成する場合に指定します。
|
| 52 |
+
|
| 53 |
+
サンプル画像、動画は、`--output_dir`で指定したディレクトリ内の、`sample`ディレクトリに保存されます。静止画の場合は`.png`、動画の場合は`.mp4`で保存されます。
|
| 54 |
+
</details>
|
| 55 |
+
|
| 56 |
+
### Prompt file / プロンプトファイル
|
| 57 |
+
|
| 58 |
+
The prompt file is a text file that contains the prompts for generating sample images. The example is as follows. / プロンプトファイルは、サンプル画像生成のためのプロンプトを記述したテキストファイルです。例は以下の通りです。
|
| 59 |
+
|
| 60 |
+
```
|
| 61 |
+
# prompt 1: for generating a cat video
|
| 62 |
+
A cat walks on the grass, realistic style. --w 640 --h 480 --f 25 --d 1 --s 20
|
| 63 |
+
|
| 64 |
+
# prompt 2: for generating a dog image
|
| 65 |
+
A dog runs on the beach, realistic style. --w 960 --h 544 --f 1 --d 2 --s 20
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
A line starting with `#` is a comment.
|
| 69 |
+
|
| 70 |
+
* `--w` specifies the width of the generated image or video. The default is 256.
|
| 71 |
+
* `--h` specifies the height. The default is 256.
|
| 72 |
+
* `--f` specifies the number of frames. The default is 1, which generates a still image.
|
| 73 |
+
* `--d` specifies the seed. The default is random.
|
| 74 |
+
* `--s` specifies the number of steps in generation. The default is 20.
|
| 75 |
+
* `--g` specifies the embedded guidance scale (not CFG scale). The default is 6.0 for HunyuanVideo, 10.0 for FramePack, 2.5 for FLUX.1 Kontext which is the default value during inference of each architecture. Specify 1.0 for SkyReels V1 models. Ignore this option for Wan2.1 models.
|
| 76 |
+
* `--fs` specifies the discrete flow shift. The default is 14.5, which corresponds to the number of steps 20. In the HunyuanVideo paper, 7.0 is recommended for 50 steps, and 17.0 is recommended for less than 20 steps (e.g. 10). Ignore this option for FramePack models (it uses 10.0). Set 0 to use 'flux_shift' for FLUX.1 Kontext models.
|
| 77 |
+
|
| 78 |
+
If you train I2V models, you must add the following option.
|
| 79 |
+
|
| 80 |
+
* `--i path/to/image.png`: the image path for image2video inference. PNG, JPG and other formats are supported.
|
| 81 |
+
|
| 82 |
+
If you train Wan2.1-Fun-Control models, you must add the following option.
|
| 83 |
+
|
| 84 |
+
* `--cn path/to/control_video_or_dir_of_images`: the path to the video or directory containing multiple images for control.
|
| 85 |
+
|
| 86 |
+
If you train the model with classifier free guidance (such as Wan2.1), you can use the additional options below.
|
| 87 |
+
|
| 88 |
+
*`--n negative prompt...`: the negative prompt for the classifier free guidance. The default prompt for each model is used if omitted.
|
| 89 |
+
*`--l 6.0`: the classifier free guidance scale. Should be set to 6.0 for SkyReels V1 models. 5.0 is the default value for Wan2.1 (if omitted).
|
| 90 |
+
|
| 91 |
+
If you train the model with control images (such as FramePack one frame inference or FLUX.1 Kontext), you can use the additional options below.
|
| 92 |
+
|
| 93 |
+
* `--ci path/to/control_image.jpg`: the control image path for inference. If you specify this option, the control image is used for inference. PNG, JPG and other formats are supported.
|
| 94 |
+
|
| 95 |
+
### Sample image generation during Qwen-Image-Layered training / Qwen-Image-Layeredの学習中のサンプルイメージ生成
|
| 96 |
+
|
| 97 |
+
`--f` option is treated as the number of output layers.
|
| 98 |
+
|
| 99 |
+
The prompt can be omitted when generating sample images during Qwen-Image-Layered training. In this case, the prompt is generated based on the control image by Qwen2.5-VL.
|
| 100 |
+
|
| 101 |
+
※ Since Qwen-Image-Layered models generate "original image + multiple layer images", the number of images generated is the number specified by the `--f` option + 1. The second and subsequent images are separated layer images.
|
| 102 |
+
|
| 103 |
+
<details>
|
| 104 |
+
<summary>日本語</summary>
|
| 105 |
+
|
| 106 |
+
`#` で始まる行はコメントです。
|
| 107 |
+
|
| 108 |
+
* `--w` 生成画像、動画の幅を指定します。省略時は256です。
|
| 109 |
+
* `--h` 高さを指定します。省略時は256です。
|
| 110 |
+
* `--f` フレーム数を指定します。省略時は1で、静止画を生成します。
|
| 111 |
+
* `--d` シードを指定します。省略時はランダムです。
|
| 112 |
+
* `--s` 生成におけるステップ数を指定します。省略時は20です。
|
| 113 |
+
* `--g` embedded guidance scaleを指定します(CFG scaleではありません)。省略時はHunyuanVideoは6.0、FramePackは10.0で、各アーキテクチャの推論時のデフォルト値です。SkyReels V1モデルの場合は1.0を指定してください。FLUX.1 Kontextの場合は2.5を指定してください。Wan2.1モデルの場合はこのオプションは無視されます。
|
| 114 |
+
* `--fs` discrete flow shiftを指定します。省略時は14.5で、ステップ数20の場合に対応した値です。HunyuanVideoの論文では、ステップ数50の場合は7.0、ステップ数20未満(10など)で17.0が推奨されています。FramePackモデルはこのオプションは無視され、10.0が使用されます。FLUX.1 Kontextモデルでは、0を指定すると `flux_shift` が使用されます。
|
| 115 |
+
|
| 116 |
+
I2Vモデルを学習する場合、以下のオプションを追加してください。
|
| 117 |
+
|
| 118 |
+
* `--i path/to/image.png`: image2video推論用の画像パス。PNG、JPGなどの形式がサポートされています。
|
| 119 |
+
|
| 120 |
+
Wan2.1-Fun-Controlモデルを学習する場合、以下のオプションを追加してください。
|
| 121 |
+
|
| 122 |
+
* `--cn path/to/control_video_or_dir_of_images`: control用の動画または複数枚の画像を含むディレクトリのパス。
|
| 123 |
+
|
| 124 |
+
classifier free guidance(ネガティブプロンプト)を必要とするモデル(Wan2.1など)を学習する場合、以下の追加オプションを使用できます。
|
| 125 |
+
|
| 126 |
+
*`--n negative prompt...`: classifier free guidance用のネガティブプロンプト。省略時はモデルごとのデフォルトプロンプトが使用されます。
|
| 127 |
+
*`--l 6.0`: classifier free guidance scale。SkyReels V1モデルの場合は6.0に設定してください。Wan2.1の場合はデフォルト値が5.0です(省略時)。
|
| 128 |
+
|
| 129 |
+
制御画像を使用するモデル(FramePackの1フレーム推論やFLUX.1 Kontextなど)を学習する場合、以下の追加オプションを使用できます。
|
| 130 |
+
|
| 131 |
+
* `--ci path/to/control_image.jpg`: 推論用の制御画像パス。このオプションを指定すると、制御画像が推論に使用されます。PNG、JPGなどの形式がサポートされています。
|
| 132 |
+
|
| 133 |
+
**Qwen-Image-Layeredの学習中のサンプルイメージ生成**
|
| 134 |
+
|
| 135 |
+
`--f`オプションが出力レイヤー数として扱われます。
|
| 136 |
+
|
| 137 |
+
Qwen-Image-Layeredの学習中にサンプル画像を生成する際、プロンプトは省略可能です。この場合、プロンプトはQwen2.5-VLによってコントロール画像に基づいて生成されます。
|
| 138 |
+
|
| 139 |
+
※ Qwen-Image-Layeredモデルは「元画像+複数のレイヤー画像」を生成するため、`--f`オプションで指定した数+1枚の画像が生成されます。2枚目以降が分離されたレイヤー画像です。
|
| 140 |
+
|
| 141 |
+
</details>
|
docs/tools.md
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# Tools
|
| 4 |
+
|
| 5 |
+
This document provides documentation for utility tools available in this project.
|
| 6 |
+
|
| 7 |
+
## Table of Contents
|
| 8 |
+
|
| 9 |
+
- [LoRA Post-Hoc EMA merging / LoRAのPost-Hoc EMAマージ](#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ)
|
| 10 |
+
- [Image Captioning with Qwen2.5-VL / Qwen2.5-VLによる画像キャプション生成](#image-captioning-with-qwen25-vl--qwen25-vlによる画像キャプション生成)
|
| 11 |
+
|
| 12 |
+
## LoRA Post-Hoc EMA merging / LoRAのPost-Hoc EMAマージ
|
| 13 |
+
|
| 14 |
+
The LoRA Post-Hoc EMA (Exponential Moving Average) merging is a technique to combine multiple LoRA checkpoint files into a single, potentially more stable model. This method applies exponential moving average across multiple checkpoints sorted by modification time, with configurable decay rates.
|
| 15 |
+
|
| 16 |
+
The Post-Hoc EMA method works by:
|
| 17 |
+
|
| 18 |
+
1. Sorting checkpoint files by modification time (oldest to newest)
|
| 19 |
+
2. Using the oldest checkpoint as the base
|
| 20 |
+
3. Iteratively merging subsequent checkpoints with a decay rate (beta)
|
| 21 |
+
4. Optionally using linear interpolation between two beta values across the merge process
|
| 22 |
+
|
| 23 |
+
Pseudo-code for merging multiple checkpoints with beta=0.95 would look like this:
|
| 24 |
+
|
| 25 |
+
```
|
| 26 |
+
beta = 0.95
|
| 27 |
+
checkpoints = [checkpoint1, checkpoint2, checkpoint3] # List of checkpoints
|
| 28 |
+
merged_weights = checkpoints[0] # Use the first checkpoint as the base
|
| 29 |
+
for checkpoint in checkpoints[1:]:
|
| 30 |
+
merged_weights = beta * merged_weights + (1 - beta) * checkpoint
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
### Key features:
|
| 34 |
+
|
| 35 |
+
- **Temporal ordering**: Automatically sorts files by modification time
|
| 36 |
+
- **Configurable decay rates**: Supports single beta value or linear interpolation between two beta values
|
| 37 |
+
- **Metadata preservation**: Maintains and updates metadata from the last checkpoint
|
| 38 |
+
- **Hash updating**: Recalculates model hashes for the merged weights
|
| 39 |
+
- **Dtype preservation**: Maintains original data types of tensors
|
| 40 |
+
|
| 41 |
+
### Usage
|
| 42 |
+
|
| 43 |
+
The LoRA Post-Hoc EMA merging is available as a standalone script:
|
| 44 |
+
|
| 45 |
+
```bash
|
| 46 |
+
python src/musubi_tuner/lora_post_hoc_ema.py checkpoint1.safetensors checkpoint2.safetensors checkpoint3.safetensors --output_file merged_lora.safetensors --beta 0.95
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
### Command line options:
|
| 50 |
+
|
| 51 |
+
```
|
| 52 |
+
path [path ...]
|
| 53 |
+
List of paths to the LoRA weight files to merge
|
| 54 |
+
|
| 55 |
+
--beta BETA
|
| 56 |
+
Decay rate for merging weights (default: 0.95)
|
| 57 |
+
Higher values (closer to 1.0) give more weight to the accumulated average
|
| 58 |
+
Lower values give more weight to the current checkpoint
|
| 59 |
+
|
| 60 |
+
--beta2 BETA2
|
| 61 |
+
Second decay rate for linear interpolation (optional)
|
| 62 |
+
If specified, the decay rate will linearly interpolate from beta to beta2
|
| 63 |
+
across the merging process
|
| 64 |
+
|
| 65 |
+
--sigma_rel SIGMA_REL
|
| 66 |
+
Relative sigma for Power Function EMA (optional, mutually exclusive with beta/beta2)
|
| 67 |
+
This resolves the issue where the first checkpoint has a disproportionately large influence when beta is specified.
|
| 68 |
+
If specified, beta is calculated using the Power Function EMA method from the paper:
|
| 69 |
+
https://arxiv.org/pdf/2312.02696. This overrides beta and beta2.
|
| 70 |
+
|
| 71 |
+
--output_file OUTPUT_FILE
|
| 72 |
+
Output file path for the merged weights (required)
|
| 73 |
+
|
| 74 |
+
--no_sort
|
| 75 |
+
Disable sorting of checkpoint files (merge in specified order)
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
### Examples:
|
| 79 |
+
|
| 80 |
+
Basic usage with constant decay rate:
|
| 81 |
+
```bash
|
| 82 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 83 |
+
lora_epoch_001.safetensors \
|
| 84 |
+
lora_epoch_002.safetensors \
|
| 85 |
+
lora_epoch_003.safetensors \
|
| 86 |
+
--output_file lora_ema_merged.safetensors \
|
| 87 |
+
--beta 0.95
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
Using linear interpolation between two decay rates:
|
| 91 |
+
```bash
|
| 92 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 93 |
+
lora_epoch_001.safetensors \
|
| 94 |
+
lora_epoch_002.safetensors \
|
| 95 |
+
lora_epoch_003.safetensors \
|
| 96 |
+
--output_file lora_ema_interpolated.safetensors \
|
| 97 |
+
--beta 0.90 \
|
| 98 |
+
--beta2 0.95
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
Using Power Function EMA with `sigma_rel`:
|
| 102 |
+
```bash
|
| 103 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 104 |
+
lora_epoch_001.safetensors \
|
| 105 |
+
lora_epoch_002.safetensors \
|
| 106 |
+
lora_epoch_003.safetensors \
|
| 107 |
+
--output_file lora_power_ema_merged.safetensors \
|
| 108 |
+
--sigma_rel 0.2
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
#### betas for different σ-rel values:
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
|
| 116 |
+
### Recommended settings example (after training for 30 epochs, using `--beta`)
|
| 117 |
+
|
| 118 |
+
If you're unsure which settings to try, start with the following "General Recommended Settings".
|
| 119 |
+
|
| 120 |
+
#### 1. General Recommended Settings (start with these combinations)
|
| 121 |
+
|
| 122 |
+
- **Target Epochs:** `15-30` (the latter half of training)
|
| 123 |
+
- **beta:** `0.9` (a balanced value)
|
| 124 |
+
|
| 125 |
+
#### 2. If training converged early
|
| 126 |
+
|
| 127 |
+
- **Situation:** Loss dropped early and stabilized afterwards.
|
| 128 |
+
- **Target Epochs:** `10-30` (from the epoch where loss stabilized to the end)
|
| 129 |
+
- **beta:** `0.95` (wider range, smoother)
|
| 130 |
+
|
| 131 |
+
#### 3. If you want to avoid overfitting
|
| 132 |
+
|
| 133 |
+
- **Situation:** In the latter part of training, generated results are too similar to training data.
|
| 134 |
+
- **Target Epochs:** `15-25` (focus on the peak performance range)
|
| 135 |
+
- **beta:** `0.8` (more emphasis on the latter part of the range while maintaining diversity)
|
| 136 |
+
|
| 137 |
+
**Note:** The optimal values may vary depending on the model and dataset. It's recommended to experiment with multiple `beta` values (e.g., 0.8, 0.9, 0.95) and compare the generated results.
|
| 138 |
+
|
| 139 |
+
### Recommended Settings Example (30 epochs training, using `--sigma_rel`)
|
| 140 |
+
|
| 141 |
+
When using `--sigma_rel`, the beta decay schedule is determined by the Power Function EMA method. Here are some starting points:
|
| 142 |
+
|
| 143 |
+
#### 1. General Recommended Settings
|
| 144 |
+
- **Target Epochs:** All epochs (from the first to the last).
|
| 145 |
+
- **sigma_rel:** `0.2` (a general starting point).
|
| 146 |
+
|
| 147 |
+
#### 2. If training converged early
|
| 148 |
+
- **Situation:** Loss dropped early and stabilized afterwards.
|
| 149 |
+
- **Target Epochs:** All epochs.
|
| 150 |
+
- **sigma_rel:** `0.25` (gives more weight to earlier checkpoints, suitable for early convergence).
|
| 151 |
+
|
| 152 |
+
#### 3. If you want to avoid overfitting
|
| 153 |
+
- **Situation:** In the latter part of training, generated results are too similar to training data.
|
| 154 |
+
- **Target Epochs:** From the first epoch, omitting the last few potentially overfitted epochs.
|
| 155 |
+
- **sigma_rel:** `0.15` (gives more weight to later (but not the very last) checkpoints, helping to mitigate overfitting from the final stages).
|
| 156 |
+
|
| 157 |
+
**Note:** The optimal `sigma_rel` value can depend on the dataset, model, and training duration. Experimentation is encouraged. Values typically range from 0.1 to 0.5. A graph showing the relationship between `sigma_rel` and the calculated `beta` values over epochs will be provided later to help understand its behavior.
|
| 158 |
+
|
| 159 |
+
### Notes:
|
| 160 |
+
|
| 161 |
+
- Files are automatically sorted by modification time, so the order in the command line doesn't matter
|
| 162 |
+
- The `--sigma_rel` option is mutually exclusive with `--beta` and `--beta2`. If `--sigma_rel` is provided, it will determine the beta values, and any provided `--beta` or `--beta2` will be ignored.
|
| 163 |
+
- All checkpoint files to be merged should be from the same training run, saved per epoch or step
|
| 164 |
+
- Merging is possible if shapes match, but may not work correctly as Post Hoc EMA
|
| 165 |
+
- All checkpoint files must have the same alpha value
|
| 166 |
+
- The merged model will have updated hash values in its metadata
|
| 167 |
+
- The metadata of the merged model will be taken from the last checkpoint, with only the hash value recalculated
|
| 168 |
+
- Non-float tensors (long, int, bool, etc.) are not merged and will use the first checkpoint's values
|
| 169 |
+
- Processing is done in float32 precision to maintain numerical stability during merging. The original data types are preserved when saving
|
| 170 |
+
|
| 171 |
+
<details>
|
| 172 |
+
<summary>日本語</summary>
|
| 173 |
+
|
| 174 |
+
LoRA Post-Hoc EMA(指数移動平均)マージは、複数のLoRAチェックポイントファイルを単一の、より安定したモデルに結合する手法です。スクリプトでは、修正時刻でソート(古い順)された複数のチェックポイントに対して指定された減衰率で指数移動平均を適用します。減衰率は指定可能です。
|
| 175 |
+
|
| 176 |
+
Post-Hoc EMA方法の動作:
|
| 177 |
+
|
| 178 |
+
1. チェックポイントファイルを修正時刻順(古いものから新しいものへ)にソート
|
| 179 |
+
2. 最古のチェックポイントをベースとして使用
|
| 180 |
+
3. 減衰率(beta)を使って後続のチェックポイントを反復的にマージ
|
| 181 |
+
4. オプションで、マージプロセス全体で2つのベータ値間の線形補間を使用
|
| 182 |
+
|
| 183 |
+
疑似コードによるイメージ:複数のチェックポイントをbeta=0.95でマージする場合、次のように計算されます。
|
| 184 |
+
|
| 185 |
+
```
|
| 186 |
+
beta = 0.95
|
| 187 |
+
checkpoints = [checkpoint1, checkpoint2, checkpoint3] # チェックポイントのリスト
|
| 188 |
+
merged_weights = checkpoints[0] # 最初のチェックポイントをベースとして使用
|
| 189 |
+
for checkpoint in checkpoints[1:]:
|
| 190 |
+
merged_weights = beta * merged_weights + (1 - beta) * checkpoint
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
### 主な特徴:
|
| 194 |
+
|
| 195 |
+
- **時系列順序付け**: ファイルを修正時刻で自動的にソート
|
| 196 |
+
- **設定可能な減衰率**: 単一のベータ値または2つのベータ値間の線形補間をサポート
|
| 197 |
+
- **メタデータ保持**: 最後のチェックポイントからメタデータを維持・更新
|
| 198 |
+
- **ハッシュ更新**: マージされた重みのモデルハッシュを再計算
|
| 199 |
+
- **データ型保持**: テンソルの元のデータ型を維持
|
| 200 |
+
|
| 201 |
+
### 使用法
|
| 202 |
+
|
| 203 |
+
LoRA Post-Hoc EMAマージは独立したスクリプトとして提供されています:
|
| 204 |
+
|
| 205 |
+
```bash
|
| 206 |
+
python src/musubi_tuner/lora_post_hoc_ema.py checkpoint1.safetensors checkpoint2.safetensors checkpoint3.safetensors --output_file merged_lora.safetensors --beta 0.95
|
| 207 |
+
```
|
| 208 |
+
|
| 209 |
+
### コマンドラインオプション:
|
| 210 |
+
|
| 211 |
+
```
|
| 212 |
+
path [path ...]
|
| 213 |
+
マージするLoRA重みファイルのパスのリスト
|
| 214 |
+
|
| 215 |
+
--beta BETA
|
| 216 |
+
重みマージのための減衰率(デフォルト:0.95)
|
| 217 |
+
高い値(1.0に近い)は累積平均により大きな重みを与える(古いチェックポイントを重視)
|
| 218 |
+
低い値は現在のチェックポイントにより大きな重みを与える
|
| 219 |
+
|
| 220 |
+
--beta2 BETA2
|
| 221 |
+
線形補間のための第2減衰率(オプション)
|
| 222 |
+
指定された場合、減衰率はマージプロセス全体でbetaからbeta2へ線形補間される
|
| 223 |
+
|
| 224 |
+
--sigma_rel SIGMA_REL
|
| 225 |
+
Power Function EMAのための相対シグマ(オプション、beta/beta2と同時に指定できません)
|
| 226 |
+
betaを指定した場合の、最初のチェックポイントが相対的に大きな影響を持つ欠点を解決します
|
| 227 |
+
指定された場合、betaは次の論文に基づいてPower Function EMA法で計算されます:
|
| 228 |
+
https://arxiv.org/pdf/2312.02696. これによりbetaとbeta2が上書きされます。
|
| 229 |
+
|
| 230 |
+
--output_file OUTPUT_FILE
|
| 231 |
+
マージされた重みの出力ファイルパス(必須)
|
| 232 |
+
|
| 233 |
+
--no_sort
|
| 234 |
+
チェックポイントファイルのソートを無効にする(指定した順序でマージ)
|
| 235 |
+
```
|
| 236 |
+
|
| 237 |
+
### 例:
|
| 238 |
+
|
| 239 |
+
定数減衰率での基本的な使用法:
|
| 240 |
+
```bash
|
| 241 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 242 |
+
lora_epoch_001.safetensors \
|
| 243 |
+
lora_epoch_002.safetensors \
|
| 244 |
+
lora_epoch_003.safetensors \
|
| 245 |
+
--output_file lora_ema_merged.safetensors \
|
| 246 |
+
--beta 0.95
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
2つの減衰率間の線形補間を使用:
|
| 250 |
+
```bash
|
| 251 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 252 |
+
lora_epoch_001.safetensors \
|
| 253 |
+
lora_epoch_002.safetensors \
|
| 254 |
+
lora_epoch_003.safetensors \
|
| 255 |
+
--output_file lora_ema_interpolated.safetensors \
|
| 256 |
+
--beta 0.90 \
|
| 257 |
+
--beta2 0.95
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
`シグマ_rel`を使用したPower Function EMA:
|
| 261 |
+
```bash
|
| 262 |
+
python src/musubi_tuner/lora_post_hoc_ema.py \
|
| 263 |
+
lora_epoch_001.safetensors \
|
| 264 |
+
lora_epoch_002.safetensors \
|
| 265 |
+
lora_epoch_003.safetensors \
|
| 266 |
+
--output_file lora_power_ema_merged.safetensors \
|
| 267 |
+
--sigma_rel 0.2
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
### 推奨設定の例 (30エポック学習し、 `--beta`を使用する場合)
|
| 271 |
+
|
| 272 |
+
どの設定から試せば良いか分からない場合は、まず以下の「**一般的な推奨設定**」から始めてみてください。
|
| 273 |
+
|
| 274 |
+
#### 1. 一般的な推奨設定 (まず試すべき組み合わせ)
|
| 275 |
+
|
| 276 |
+
- **対象エポック:** `15-30` (学習の後半半分)
|
| 277 |
+
- **beta:** `0.9` (バランスの取れた値)
|
| 278 |
+
|
| 279 |
+
#### 2. 早期に学習が収束した場合
|
| 280 |
+
|
| 281 |
+
- **状況:** lossが早い段階で下がり、その後は安定している。
|
| 282 |
+
- **対象エポック:** `10-30` (lossが安定し始めたエポックから最後まで)
|
| 283 |
+
- **beta:** `0.95` (対象範囲が広いので、より滑らかにする)
|
| 284 |
+
|
| 285 |
+
#### 3. 過学習を避けたい場合
|
| 286 |
+
|
| 287 |
+
- **状況:** 学習の最後の方で、生成結果が学習データに似すぎている。
|
| 288 |
+
- **対象エポック:** `15-25` (性能のピークと思われる範囲に絞る)
|
| 289 |
+
- **beta:** `0.8` (範囲の終盤を重視しつつ、多様性を残す)
|
| 290 |
+
|
| 291 |
+
**ヒント:** 最適な値はモデルやデータセットによって異なります。複数の`beta`(例: 0.8, 0.9, 0.95)を試して、生成結果を比較することをお勧めします。
|
| 292 |
+
|
| 293 |
+
### 推奨設定の例 (30エポック学習し、 `--sigma_rel`を使用する場合)
|
| 294 |
+
|
| 295 |
+
`--sigma_rel` を使用する場合、betaの減衰スケジュールはPower Function EMA法によって決定されます。以下はいくつかの開始点です。
|
| 296 |
+
|
| 297 |
+
#### 1. 一般的な推奨設定
|
| 298 |
+
- **対象エポック:** 全てのエポック(最初から最後まで)
|
| 299 |
+
- **sigma_rel:** `0.2` (一般的な開始点)
|
| 300 |
+
|
| 301 |
+
#### 2. 早期に学習が収束した場合
|
| 302 |
+
- **状況:** lossが早い段階で下がり、その後は安定している。
|
| 303 |
+
- **対象エポック:** 全てのエポック
|
| 304 |
+
- **sigma_rel:** `0.25` (初期のチェックポイントに重きを置くため、早期収束に適しています)
|
| 305 |
+
|
| 306 |
+
#### 3. 過学習を避けたい場合
|
| 307 |
+
- **状況:** 学習の最後の方で、生成結果が学習データに似すぎている。
|
| 308 |
+
- **対象エポック:** 最初のエポックから、過学習の可能性がある最後の数エポックを除外
|
| 309 |
+
- **sigma_rel:** `0.15` (終盤(ただし最後の最後ではない)のチェックポイントに重きを置き、最終段階での過学習を軽減するのに役立ちます)
|
| 310 |
+
|
| 311 |
+
**ヒント:** 最適な `sigma_rel` の値は、データセット、モデル、学習期間によって異なる場合があります。実験を推奨します。値は通常0.1から0.5の範囲です。`sigma_rel` とエポックごとの計算された `beta` 値の関係を示すグラフは、その挙動を理解するのに役立つよう後ほど提供する予定です。
|
| 312 |
+
|
| 313 |
+
### 注意点:
|
| 314 |
+
|
| 315 |
+
- ファイルは修正時刻で自動的にソートされるため、コマンドラインでの順序は関係ありません
|
| 316 |
+
- `--sigma_rel`オプションは`--beta`および`--beta2`と相互に排他的です。`--sigma_rel`が指定された場合、それがベータ値を決定し、指定された`--beta`または`--beta2`は無視されます。
|
| 317 |
+
- マージする全てのチェックポイントファイルは、ひとつの学習で、エポックごと、またはステップ���とに保存されたモデルである必要があります
|
| 318 |
+
- 形状が一致していればマージはできますが、Post Hoc EMAとしては正しく動作しません
|
| 319 |
+
- alpha値はすべてのチェックポイントで同じである必要があります
|
| 320 |
+
- マージされたモデルのメタデータは、最後のチェックポイントのものが利用されます。ハッシュ値のみが再計算されます
|
| 321 |
+
- 浮動小数点以外の、long、int、boolなどのテンソルはマージされません(最初のチェックポイントのものが使用されます)
|
| 322 |
+
- マージ中の数値安定性を維持するためにfloat32精度で計算されます。保存時は元のデータ型が維持されます
|
| 323 |
+
|
| 324 |
+
</details>
|
| 325 |
+
|
| 326 |
+
## Image Captioning with Qwen2.5-VL / Qwen2.5-VLによる画像キャプション生成
|
| 327 |
+
|
| 328 |
+
The `caption_images_by_qwen_vl.py` script automatically generates captions for a directory of images using a fine-tuned Qwen2.5-VL model. It's designed to help prepare datasets for training by creating captions from the images themselves.
|
| 329 |
+
|
| 330 |
+
The Qwen2.5-VL model used in Qwen-Image is not confirmed to be the same as the original Qwen2.5-VL-Instruct model, but it appears to work for caption generation based on the tests conducted.
|
| 331 |
+
|
| 332 |
+
<details>
|
| 333 |
+
<summary>日本語</summary>
|
| 334 |
+
|
| 335 |
+
`caption_images_by_qwen_vl.py`スクリプトは、Qwen2.5-VLモデルを使用して、指定されたディレクトリ内の画像に対するキャプションを自動生成します。画像自体からキャプションを作成することで、学習用データセットの準備を支援することを目的としています。
|
| 336 |
+
|
| 337 |
+
Qwen-Imageで使用されているQwen2.5-VLモデルは、元のQwen2.5-VL-Instructモデルと同じかどうか不明ですが、試した範囲ではキャプション生成も動作するようです。
|
| 338 |
+
|
| 339 |
+
</details>
|
| 340 |
+
|
| 341 |
+
### Arguments
|
| 342 |
+
|
| 343 |
+
- `--image_dir` (required): Path to the directory containing the images to be captioned.
|
| 344 |
+
- `--model_path` (required): Path to the Qwen2.5-VL model. See [here](./qwen_image.md#download-the-model--モデルのダウンロード) for instructions.
|
| 345 |
+
- `--output_file` (optional): Path to the output JSONL file. This is required if `--output_format` is `jsonl`.
|
| 346 |
+
- `--max_new_tokens` (optional, default: 1024): The maximum number of new tokens to generate for each caption.
|
| 347 |
+
- `--prompt` (optional, default: see script): A custom prompt to use for caption generation. You can use `\n` for newlines.
|
| 348 |
+
- `--max_size` (optional, default: 1280): The maximum size of the image. Images are resized to fit within a `max_size` x `max_size` area while maintaining aspect ratio.
|
| 349 |
+
- `--fp8_vl` (optional, flag): If specified, the Qwen2.5-VL model is loaded in fp8 precision for lower memory usage.
|
| 350 |
+
- `--output_format` (optional, default: `jsonl`): The output format. Can be `jsonl` to save all captions in a single JSONL file, or `text` to save a separate `.txt` file for each image.
|
| 351 |
+
|
| 352 |
+
`--max_size` can be reduced to decrease the image size passed to the VLM. This can reduce the memory usage of the VLM, but may also decrease the quality of the generated captions.
|
| 353 |
+
|
| 354 |
+
The default prompt is defined in the [source file](/src/musubi_tuner/caption_images_by_qwen_vl.py). It is based on the [Qwen-Image Technical Report](https://arxiv.org/abs/2508.02324).
|
| 355 |
+
|
| 356 |
+
<details>
|
| 357 |
+
<summary>日本語</summary>
|
| 358 |
+
|
| 359 |
+
- `--image_dir` (必須): キャプションを生成する画像が含まれるディレクトリへのパス。
|
| 360 |
+
- `--model_path` (必須): Qwen2.5-VLモデルへのパス。詳細は[こちら](./qwen_image.md#download-the-model--モデルのダウンロード)を参照してください。
|
| 361 |
+
- `--output_file` (任意): 出力先のJSONLファイルへのパス。`--output_format`が`jsonl`の場合に必須です。
|
| 362 |
+
- `--max_new_tokens` (任意, デフォルト: 1024): 各キャプションで生成する新しいトークンの最大数。
|
| 363 |
+
- `--prompt` (任意, デフォルト: スクリプト内参照): キャプション生成に使用するカスタムプロンプト。`\n`で改行を指定できます。
|
| 364 |
+
- `--max_size` (任意, デフォルト: 1280): 画像の最大サイズ。アスペクト比を維持したまま、画像の合計ピクセル数が`max_size` x `max_size`の領域に収まるようにリサイズされます。
|
| 365 |
+
- `--fp8_vl` (任意, フラグ): 指定された場合、Qwen2.5-VLモデルがfp8精度で読み込まれ、メモリ使用量が削減されます。
|
| 366 |
+
- `--output_format` (任意, デフォルト: `jsonl`): 出力形式。`jsonl`を指定するとすべてのキャプションが単一のJSONLファイルに保存され、`text`を指定すると画像ごとに個別の`.txt`ファイルが保存されます。
|
| 367 |
+
|
| 368 |
+
`--max_size` を小さくするとVLMに渡される画像サイズが小さくなります。これにより、VLMのメモリ使用量が削減されますが、生成されるキャプションの品質が低下する可能性があります。
|
| 369 |
+
|
| 370 |
+
プロンプトのデフォルトは、[ソース���ァイル](/src/musubi_tuner/caption_images_by_qwen_vl.py)内で定義されています。[Qwen-Image Technical Report](https://arxiv.org/abs/2508.02324)を参考にしたものです。
|
| 371 |
+
|
| 372 |
+
</details>
|
| 373 |
+
|
| 374 |
+
### Usage Examples
|
| 375 |
+
|
| 376 |
+
**1. Basic Usage (JSONL Output)**
|
| 377 |
+
|
| 378 |
+
```bash
|
| 379 |
+
python src/musubi_tuner/caption_images_by_qwen_vl.py \
|
| 380 |
+
--image_dir /path/to/images \
|
| 381 |
+
--model_path /path/to/qwen_model.safetensors \
|
| 382 |
+
--output_file /path/to/captions.jsonl
|
| 383 |
+
```
|
| 384 |
+
|
| 385 |
+
**2. Text File Output**
|
| 386 |
+
|
| 387 |
+
This will create a `.txt` file with the same name as each image in the `/path/to/images` directory.
|
| 388 |
+
|
| 389 |
+
```bash
|
| 390 |
+
python src/musubi_tuner/caption_images_by_qwen_vl.py \
|
| 391 |
+
--image_dir /path/to/images \
|
| 392 |
+
--model_path /path/to/qwen_model.safetensors \
|
| 393 |
+
--output_format text
|
| 394 |
+
```
|
| 395 |
+
|
| 396 |
+
**3. Advanced Usage (fp8, Custom Prompt, and Max Size)**
|
| 397 |
+
|
| 398 |
+
```bash
|
| 399 |
+
python src/musubi_tuner/caption_images_by_qwen_vl.py \
|
| 400 |
+
--image_dir /path/to/images \
|
| 401 |
+
--model_path /path/to/qwen_model.safetensors \
|
| 402 |
+
--output_file /path/to/captions.jsonl \
|
| 403 |
+
--fp8_vl \
|
| 404 |
+
--max_size 1024 \
|
| 405 |
+
--prompt "A detailed and descriptive caption for this image is:\n"
|
| 406 |
+
```
|
docs/torch_compile.md
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# torch.compile Support
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the `torch.compile` optimization feature in Musubi Tuner. PyTorch's `torch.compile` is a just-in-time (JIT) compilation feature that can significantly improve training and inference performance by optimizing model execution.
|
| 6 |
+
|
| 7 |
+
For technical details and implementation specifics, please refer to [Pull Request #722](https://github.com/kohya-ss/musubi-tuner/pull/722).
|
| 8 |
+
|
| 9 |
+
Also, refer to the official PyTorch documentation: https://docs.pytorch.org/tutorials/intermediate/torch_compile_tutorial.html#introduction-to-torch-compile
|
| 10 |
+
|
| 11 |
+
Note: `torch.compile` may not work well in various situations. Please refer to the "Limitations and Known Issues" section below for details. If it does not work, please use the traditional method for training/inference.
|
| 12 |
+
|
| 13 |
+
<details>
|
| 14 |
+
<summary>日本語</summary>
|
| 15 |
+
|
| 16 |
+
このドキュメントでは、Musubi Tunerにおける`torch.compile`最適化機能について説明します。PyTorchの`torch.compile`は、モデルの実行を最適化することで学習と推論のパフォーマンスを大幅に向上させることができるジャストインタイム(JIT)コンパイル機能です。
|
| 17 |
+
|
| 18 |
+
技術的な詳細や実装の詳細については、[Pull Request #722](https://github.com/kohya-ss/musubi-tuner/pull/722)を参照してください。
|
| 19 |
+
|
| 20 |
+
PyTorchの公式ドキュメントも参照してください: https://docs.pytorch.org/tutorials/intermediate/torch_compile_tutorial.html#introduction-to-torch-compile
|
| 21 |
+
|
| 22 |
+
※ `torch.compile`は様々な要因でうまく動作しない場合があります。詳細は以下の「制限事項と既知の問題」セクションを参照してください。また動作しない場合には従来の方法での学習/推論を行ってください。
|
| 23 |
+
|
| 24 |
+
</details>
|
| 25 |
+
|
| 26 |
+
### Prerequisites / 前提条件
|
| 27 |
+
|
| 28 |
+
- triton is required for `torch.compile` to work effectively. For Windows, see [triton-windows repository](https://github.com/woct0rdho/triton-windows) for installation instructions.
|
| 29 |
+
- MSVC compiler is required on Windows for `--compile_dynamic` option. Visual Studio 2022 with C++ development tools or Visual Studio Build Tools 2022 is recommended. See [Windows Requirements for `--compile_dynamic`](#windows-requirements-for---compile_dynamic--windowsでの---compile_dynamic-の要件).
|
| 30 |
+
|
| 31 |
+
<details>
|
| 32 |
+
<summary>日本語</summary>
|
| 33 |
+
|
| 34 |
+
- `torch.compile`を効果的に動作させるにはtritonが必要です。Windowsの場合、インストール手順については[triton-windowsリポジトリ](https://github.com/woct0rdho/triton-windows)を参照してください。
|
| 35 |
+
- Windowsで`--compile_dynamic`オプションを使用するにはMSVCコンパイラが必要です。Visual Studio 2022のC++開発ツールまたはVisual Studio Build Tools 2022の使用を推奨します。[`--compile_dynamic`のWindows要件](#windows-requirements-for---compile_dynamic--windowsでの---compile_dynamic-の要件)を参照してください。
|
| 36 |
+
|
| 37 |
+
</details>
|
| 38 |
+
|
| 39 |
+
### Performance Improvements / パフォーマンス向上
|
| 40 |
+
|
| 41 |
+
The performance gains vary depending on hardware and settings. Here are some examples:
|
| 42 |
+
|
| 43 |
+
**Qwen-Image, 1328×1328, BS1: RTX A6000, Power Limit 180W, Windows:**
|
| 44 |
+
- Default mode: ~10.5% faster
|
| 45 |
+
- max-autotune-no-cudagraphs: ~11.1% faster
|
| 46 |
+
|
| 47 |
+
**RTX PRO 6000 Blackwell, Power Limit 250W, Windows:**
|
| 48 |
+
- Default mode: ~18.8% faster
|
| 49 |
+
- max-autotune-no-cudagraphs: ~25.2% faster
|
| 50 |
+
|
| 51 |
+
<details>
|
| 52 |
+
<summary>日本語</summary>
|
| 53 |
+
|
| 54 |
+
パフォーマンス向上は、ハードウェアと設定によって異なります。以下は一例です:
|
| 55 |
+
|
| 56 |
+
**Qwen-Image, 1328×1328, BS1: RTX A6000, Power Limit 180W, Windows:**
|
| 57 |
+
- デフォルトモード: 約10.5%高速化
|
| 58 |
+
- max-autotune-no-cudagraphs: 約11.1%高速化
|
| 59 |
+
|
| 60 |
+
**RTX PRO 6000 Blackwell, Power Limit 250W, Windows:**
|
| 61 |
+
- デフォルトモード: 約18.8%高速化
|
| 62 |
+
- max-autotune-no-cudagraphs: 約25.2%高速化
|
| 63 |
+
|
| 64 |
+
</details>
|
| 65 |
+
|
| 66 |
+
## Supported Architectures / サポートされているアーキテクチャ
|
| 67 |
+
|
| 68 |
+
`torch.compile` is supported for both training and inference in the following architectures:
|
| 69 |
+
|
| 70 |
+
- HunyuanVideo
|
| 71 |
+
- Wan2.1/2.2
|
| 72 |
+
- FramePack
|
| 73 |
+
- FLUX.1 Kontext
|
| 74 |
+
- Qwen-Image / Qwen-Image-Edit / Qwen-Image-Edit-2509
|
| 75 |
+
|
| 76 |
+
<details>
|
| 77 |
+
<summary>日本語</summary>
|
| 78 |
+
|
| 79 |
+
以下のアーキテクチャで、学習と推論の両方において`torch.compile`がサポートされています:
|
| 80 |
+
|
| 81 |
+
- HunyuanVideo
|
| 82 |
+
- Wan2.1/2.2
|
| 83 |
+
- FramePack
|
| 84 |
+
- FLUX.1 Kontext
|
| 85 |
+
- Qwen-Image / Qwen-Image-Edit / Qwen-Image-Edit-2509
|
| 86 |
+
|
| 87 |
+
</details>
|
| 88 |
+
|
| 89 |
+
## Command Line Arguments / コマンドライン引数
|
| 90 |
+
|
| 91 |
+
### Basic Arguments / 基本的な引数
|
| 92 |
+
|
| 93 |
+
- `--compile`: Enable torch.compile optimization
|
| 94 |
+
- `--compile_backend`: Backend to use (default: `inductor`)
|
| 95 |
+
- `--compile_mode`: Compilation mode (default: `default` for training, `max-autotune-no-cudagraphs` for inference)
|
| 96 |
+
- Choices: `default`, `reduce-overhead`, `max-autotune`, `max-autotune-no-cudagraphs`
|
| 97 |
+
- `--compile_dynamic`: Enable dynamic shapes support (default is None, equivalent to `auto`) (Requires Visual Studio 2022 C++ compiler on Windows)
|
| 98 |
+
- Choices: `true`, `false`, `auto`
|
| 99 |
+
- `--compile_fullgraph`: Enable fullgraph mode
|
| 100 |
+
- `--compile_cache_size_limit`: Set cache size limit (default: PyTorch default, typically 8-32, recommended: 32)
|
| 101 |
+
|
| 102 |
+
So far, it has been observed that setting `compile_mode` to `max-autotune` may not work in some cases.
|
| 103 |
+
Also, `compile_fullgraph` may not work depending on the architecture.
|
| 104 |
+
|
| 105 |
+
If `compile_dynamic` is not set to `true`, recompilation will occur each time the shape of the model input changes. This may result in longer training times for the first epoch, but subsequent epochs will be faster.
|
| 106 |
+
|
| 107 |
+
### Additional Performance Arguments / 追加のパフォーマンス引数
|
| 108 |
+
|
| 109 |
+
- `--cuda_allow_tf32`: Allow TF32 precision on Ampere or newer GPUs (improves performance)
|
| 110 |
+
- `--cuda_cudnn_benchmark`: Enable cuDNN benchmark mode (may improve performance)
|
| 111 |
+
|
| 112 |
+
<details>
|
| 113 |
+
<summary>日本語</summary>
|
| 114 |
+
|
| 115 |
+
### 基本的な引数
|
| 116 |
+
|
| 117 |
+
- `--compile`: torch.compile最適化を有効にする
|
| 118 |
+
- `--compile_backend`: 使用するバックエンド(デフォルト: `inductor`)
|
| 119 |
+
- `--compile_mode`: コンパイルモード(デフォルト: 学習時は`default`、推論時は`max-autotune-no-cudagraphs`)
|
| 120 |
+
- 選択肢: `default`, `reduce-overhead`, `max-autotune`, `max-autotune-no-cudagraphs`
|
| 121 |
+
- `--compile_dynamic`: 動的形状サポートを指定する(デフォルトは None で `auto` 相当)(Windows環境ではVisual Studio 2022のC++コンパイラが必要)
|
| 122 |
+
- 選択肢: `true`, `false`, `auto`
|
| 123 |
+
- `--compile_fullgraph`: フルグラフモードを有効にする
|
| 124 |
+
- `--compile_cache_size_limit`: キャッシュサイズ制限を設定(デフォルト: PyTorchのデフォルト、通常8-32、推奨: 32)
|
| 125 |
+
|
| 126 |
+
これまでに確認したところ、`compile_mode`は`max-autotune`に設定すると動作しないケースがあるようです。
|
| 127 |
+
また、`compile_fullgraph`はアーキテクチャにより動作しない場合があります。
|
| 128 |
+
|
| 129 |
+
`compile_dynamic`で `true` を指定しない場合、モデルの入力の形状が変わるごとに再コンパイルが発生します。最初のエポックの学習時間が長くなる可能性がありますが、その後のエポックでは高速化されます。
|
| 130 |
+
|
| 131 |
+
### 追加のパフォーマンス引数
|
| 132 |
+
|
| 133 |
+
- `--cuda_allow_tf32`: Ampereまたはそれ以降のGPUでTF32精度を許可する(パフォーマンス向上)
|
| 134 |
+
- `--cuda_cudnn_benchmark`: cuDNNベンチマークモードを有効にする(パフォーマンスが向上する可能性がある)
|
| 135 |
+
|
| 136 |
+
</details>
|
| 137 |
+
|
| 138 |
+
## Usage Examples / 使用例
|
| 139 |
+
|
| 140 |
+
### Training / 学習
|
| 141 |
+
|
| 142 |
+
#### Basic Usage / 基本的な使い方
|
| 143 |
+
|
| 144 |
+
```bash
|
| 145 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 146 |
+
src/musubi_tuner/qwen_image_train_network.py \
|
| 147 |
+
--dit path/to/dit \
|
| 148 |
+
--dataset_config path/to/config.toml \
|
| 149 |
+
(... other args ...) \
|
| 150 |
+
--compile \
|
| 151 |
+
--compile_cache_size_limit 32
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
※ Windows Command Prompt users should use ^ at the end of lines.
|
| 155 |
+
|
| 156 |
+
#### Advanced Usage / 高度な使い方
|
| 157 |
+
|
| 158 |
+
```bash
|
| 159 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 160 |
+
src/musubi_tuner/hv_train_network.py \
|
| 161 |
+
--dit path/to/dit \
|
| 162 |
+
--dataset_config path/to/config.toml \
|
| 163 |
+
(... other args ...) \
|
| 164 |
+
--compile \
|
| 165 |
+
--compile_mode max-autotune-no-cudagraphs \
|
| 166 |
+
--compile_cache_size_limit 32 \
|
| 167 |
+
--cuda_allow_tf32 \
|
| 168 |
+
--cuda_cudnn_benchmark
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
<details>
|
| 172 |
+
<summary>日本語</summary>
|
| 173 |
+
|
| 174 |
+
### 学習
|
| 175 |
+
|
| 176 |
+
#### 基本的な使い方
|
| 177 |
+
|
| 178 |
+
```bash
|
| 179 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 180 |
+
src/musubi_tuner/qwen_image_train_network.py \
|
| 181 |
+
--dit path/to/dit \
|
| 182 |
+
--dataset_config path/to/config.toml \
|
| 183 |
+
(... その他の引数 ...) \
|
| 184 |
+
--compile \
|
| 185 |
+
--compile_cache_size_limit 32
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
※ Windowsでコマンドプロンプトを使用する場合、末尾は ^ を使用してください。
|
| 189 |
+
|
| 190 |
+
#### 高度な使い方
|
| 191 |
+
|
| 192 |
+
```bash
|
| 193 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 \
|
| 194 |
+
src/musubi_tuner/hv_train_network.py \
|
| 195 |
+
--dit path/to/dit \
|
| 196 |
+
--dataset_config path/to/config.toml \
|
| 197 |
+
(... その他の引数 ...) \
|
| 198 |
+
--compile \
|
| 199 |
+
--compile_mode max-autotune-no-cudagraphs \
|
| 200 |
+
--compile_cache_size_limit 32 \
|
| 201 |
+
--cuda_allow_tf32 \
|
| 202 |
+
--cuda_cudnn_benchmark
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
</details>
|
| 206 |
+
|
| 207 |
+
### Inference / 推論
|
| 208 |
+
|
| 209 |
+
```bash
|
| 210 |
+
python src/musubi_tuner/qwen_image_generate_image.py \
|
| 211 |
+
--dit path/to/dit \
|
| 212 |
+
--vae path/to/vae \
|
| 213 |
+
--text_encoder path/to/text_encoder \
|
| 214 |
+
--prompt "A beautiful landscape" \
|
| 215 |
+
--compile \
|
| 216 |
+
--compile_mode max-autotune-no-cudagraphs
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
The existing `--compile_args` option is deprecated. It is still available for now but will be removed in the future. Please use the new individual arguments as shown in the example above.
|
| 220 |
+
|
| 221 |
+
<details>
|
| 222 |
+
<summary>日本語</summary>
|
| 223 |
+
|
| 224 |
+
### 推論
|
| 225 |
+
|
| 226 |
+
```bash
|
| 227 |
+
python src/musubi_tuner/qwen_image_generate_image.py \
|
| 228 |
+
--dit path/to/dit \
|
| 229 |
+
--vae path/to/vae \
|
| 230 |
+
--text_encoder path/to/text_encoder \
|
| 231 |
+
--prompt "A beautiful landscape" \
|
| 232 |
+
--compile \
|
| 233 |
+
--compile_mode max-autotune-no-cudagraphs
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
既存の `--compile_args` オプションは非推奨となりました。現時点では使用可能ですが、将来的には削除される予定です。上の使用例のように、新しい個別の引数を使用してください。
|
| 237 |
+
|
| 238 |
+
</details>
|
| 239 |
+
|
| 240 |
+
## Limitations and Known Issues / 制限事項と既知の問題
|
| 241 |
+
|
| 242 |
+
### Incompatible Options and Constraints / 互換性のないオプションと制約
|
| 243 |
+
|
| 244 |
+
- **`--compile_fullgraph` and `--split_attn`**: These options cannot be used together. The `--split_attn` option uses dynamic control flow that is incompatible with fullgraph mode.
|
| 245 |
+
- **`--blocks_to_swap`**: When using block swapping, `torch.compile` automatically disables compilation for Linear layers in swapped blocks to avoid conflicts. This may limit performance improvements.
|
| 246 |
+
|
| 247 |
+
### Windows Requirements for `--compile_dynamic` / Windowsでの `--compile_dynamic` の要件
|
| 248 |
+
|
| 249 |
+
**IMPORTANT**: On Windows, using `--compile_dynamic` requires:
|
| 250 |
+
|
| 251 |
+
1. **Visual Studio 2022** with C++ development tools installed
|
| 252 |
+
2. Either:
|
| 253 |
+
- Running the training/inference script from **"x64 Native Tools Command Prompt for VS 2022"**
|
| 254 |
+
- Running the training/inference script after setting environment variables by executing `vcvars64.bat` located in the Visual Studio installation directory. For example: `"C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvars64.bat"`
|
| 255 |
+
|
| 256 |
+
If you encounter compilation errors when using `--compile_dynamic` on Windows, make sure you are running from the correct command prompt.
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
<details>
|
| 261 |
+
<summary>日本語</summary>
|
| 262 |
+
|
| 263 |
+
**互換性のないオプションと制約**
|
| 264 |
+
|
| 265 |
+
- **`--compile_fullgraph` と `--split_attn`**: これらのオプションは同時に使用できません。`--split_attn`オプションは動的な制御フローを使用しており、フルグラフモードと互換性がありません。
|
| 266 |
+
- **`--blocks_to_swap`**: ブロックスワッピングを使用する場合、`torch.compile`は衝突を避けるため、スワップされるブロック内のLinearレイヤーのコンパイルを自動的に無効にします。そのため、速度向上が制限される可能性があります。
|
| 267 |
+
|
| 268 |
+
**Windowsでの `--compile_dynamic` の要件**
|
| 269 |
+
|
| 270 |
+
**重要**: Windowsで`--compile_dynamic`を使用する場合、以下が必要です:
|
| 271 |
+
|
| 272 |
+
1. **Visual Studio 2022** とC++開発ツールのインストール
|
| 273 |
+
2. 以下のいずれか:
|
| 274 |
+
- **"x64 Native Tools Command Prompt for VS 2022"** からのスクリプト実行
|
| 275 |
+
- vcvars64.batを実行して環境変数を設定した後にスクリプトを実行:Visual Studioのインストールディレクトリにある`vcvars64.bat`を実行して環境変数を設定します。例: `"C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvars64.bat"`
|
| 276 |
+
|
| 277 |
+
Windowsで`--compile_dynamic`を使用してコンパイルエラーが発生する場合は、正しい手順でコマンドプロンプトから実行していることを確認してください。
|
| 278 |
+
|
| 279 |
+
PyTorchの以下の公式ドキュメントも参照してください: https://docs.pytorch.org/tutorials/unstable/inductor_windows.html#install-a-compiler
|
| 280 |
+
|
| 281 |
+
</details>
|
| 282 |
+
|
| 283 |
+
## Recommended Settings / 推奨設定
|
| 284 |
+
|
| 285 |
+
### For Training / 学習向け
|
| 286 |
+
|
| 287 |
+
```bash
|
| 288 |
+
--compile \
|
| 289 |
+
--compile_mode default \
|
| 290 |
+
--compile_cache_size_limit 32 \
|
| 291 |
+
--cuda_allow_tf32 \
|
| 292 |
+
--cuda_cudnn_benchmark
|
| 293 |
+
```
|
| 294 |
+
|
| 295 |
+
### For Inference / 推論向け
|
| 296 |
+
|
| 297 |
+
```bash
|
| 298 |
+
--compile \
|
| 299 |
+
--compile_mode max-autotune-no-cudagraphs \
|
| 300 |
+
--compile_cache_size_limit 32
|
| 301 |
+
```
|
| 302 |
+
|
| 303 |
+
<details>
|
| 304 |
+
<summary>日本語</summary>
|
| 305 |
+
|
| 306 |
+
### 学習向け
|
| 307 |
+
|
| 308 |
+
```bash
|
| 309 |
+
--compile \
|
| 310 |
+
--compile_mode default \
|
| 311 |
+
--compile_cache_size_limit 32 \
|
| 312 |
+
--cuda_allow_tf32 \
|
| 313 |
+
--cuda_cudnn_benchmark
|
| 314 |
+
```
|
| 315 |
+
|
| 316 |
+
### 推論向け
|
| 317 |
+
|
| 318 |
+
```bash
|
| 319 |
+
--compile \
|
| 320 |
+
--compile_mode max-autotune-no-cudagraphs \
|
| 321 |
+
--compile_cache_size_limit 32
|
| 322 |
+
```
|
| 323 |
+
|
| 324 |
+
</details>
|
| 325 |
+
|
| 326 |
+
## Compilation Modes / コンパイルモード
|
| 327 |
+
|
| 328 |
+
- **`default`**: Balanced compilation with good performance and reasonable compile times. Recommended for training.
|
| 329 |
+
- **`reduce-overhead`**: Reduces Python overhead, useful for small models or frequent small operations.
|
| 330 |
+
- **`max-autotune`**: Maximum optimization with longer compile times. May provide best performance but increases initial compilation time. May not work on some architectures.
|
| 331 |
+
- **`max-autotune-no-cudagraphs`**: Similar to max-autotune but without CUDA graphs. Recommended for inference as it provides good performance improvements with better compatibility.
|
| 332 |
+
|
| 333 |
+
<details>
|
| 334 |
+
<summary>日本語</summary>
|
| 335 |
+
|
| 336 |
+
- **`default`**: バランスの取れたコンパイルで、適切なパフォーマンスと合理的なコンパイル時間を提供します。学習に推奨されます。
|
| 337 |
+
- **`reduce-overhead`**: Pythonのオーバーヘッドを削減します。小さなモデルや頻繁な小規模操作に有用です。
|
| 338 |
+
- **`max-autotune`**: コンパイル時間は長くなりますが、最大限の最適化を行います。最高のパフ��ーマンスを提供する可能性がありますが、初期コンパイル時間が増加します。アーキテクチャによっては動作しない場合があります。
|
| 339 |
+
- **`max-autotune-no-cudagraphs`**: max-autotuneと似ていますが、CUDAグラフを使用しません。良好な互換性で優れたパフォーマンス向上を提供するため、推論に推奨されます。
|
| 340 |
+
|
| 341 |
+
</details>
|
| 342 |
+
|
| 343 |
+
## Troubleshooting / トラブルシューティング
|
| 344 |
+
|
| 345 |
+
### First Iteration is Slow
|
| 346 |
+
|
| 347 |
+
This is expected behavior. `torch.compile` performs JIT compilation on the first forward pass, which takes extra time. Subsequent iterations will be much faster.
|
| 348 |
+
|
| 349 |
+
### Out of Memory Errors
|
| 350 |
+
|
| 351 |
+
If you encounter out-of-memory errors when using `torch.compile`, try:
|
| 352 |
+
- Using a smaller `--compile_cache_size_limit` value
|
| 353 |
+
- Reducing batch size
|
| 354 |
+
- Using `--compile_mode default` instead of `max-autotune`
|
| 355 |
+
|
| 356 |
+
### Compilation Errors on Windows
|
| 357 |
+
|
| 358 |
+
If using `--compile_dynamic` on Windows and encountering compilation errors:
|
| 359 |
+
1. Ensure Visual Studio 2022 with C++ development tools is installed
|
| 360 |
+
2. Run the script from "x64 Native Tools Command Prompt for VS 2022"
|
| 361 |
+
3. If issues persist, try without `--compile_dynamic`
|
| 362 |
+
|
| 363 |
+
<details>
|
| 364 |
+
<summary>日本語</summary>
|
| 365 |
+
|
| 366 |
+
**最初のイテレーションが遅い**
|
| 367 |
+
|
| 368 |
+
これは予想される動作です。`torch.compile`は最初のforward passでJITコンパイルを実行するため、追加の時間がかかります。その後のイテレーションははるかに高速になります。
|
| 369 |
+
|
| 370 |
+
**メモリ不足エラー**
|
| 371 |
+
|
| 372 |
+
`torch.compile`を使用してメモリ不足エラーが発生する場合は、次を試してください:
|
| 373 |
+
- より小さな`--compile_cache_size_limit`値を使用する
|
| 374 |
+
- バッチサイズを減らす
|
| 375 |
+
- `max-autotune`の代わりに`--compile_mode default`を使用する
|
| 376 |
+
|
| 377 |
+
**Windowsでのコンパイルエラー**
|
| 378 |
+
|
| 379 |
+
Windowsで`--compile_dynamic`を使用してコンパイルエラーが発生する場合:
|
| 380 |
+
1. C++開発ツールを含むVisual Studio 2022がインストールされていることを確認する
|
| 381 |
+
2. "x64 Native Tools Command Prompt for VS 2022"からスクリプトを実行する
|
| 382 |
+
3. 問題が解決しない場合は、`--compile_dynamic`なしで試す
|
| 383 |
+
|
| 384 |
+
</details>
|
| 385 |
+
|
| 386 |
+
## Additional Resources / 追加リソース
|
| 387 |
+
|
| 388 |
+
- [PyTorch torch.compile documentation](https://docs.pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)
|
| 389 |
+
- [PyTorch Inductor Windows documentation](https://docs.pytorch.org/tutorials/unstable/inductor_windows.html)
|
| 390 |
+
- [Pull Request #722](https://github.com/kohya-ss/musubi-tuner/pull/722) - Technical implementation details
|
| 391 |
+
|
| 392 |
+
<details>
|
| 393 |
+
<summary>日本語</summary>
|
| 394 |
+
|
| 395 |
+
- [PyTorch torch.compile ドキュメント](https://docs.pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)
|
| 396 |
+
- [PyTorch Inductor Windows ドキュメント](https://docs.pytorch.org/tutorials/unstable/inductor_windows.html)
|
| 397 |
+
- [Pull Request #722](https://github.com/kohya-ss/musubi-tuner/pull/722) - 技術的な実装の詳細
|
| 398 |
+
|
| 399 |
+
</details>
|
docs/wan.md
ADDED
|
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
> 📝 Click on the language section to expand / 言語をクリックして展開
|
| 2 |
+
|
| 3 |
+
# Wan 2.1/2.2
|
| 4 |
+
|
| 5 |
+
## Overview / 概要
|
| 6 |
+
|
| 7 |
+
This is an unofficial training and inference script for [Wan2.1](https://github.com/Wan-Video/Wan2.1) and [Wan2.2](https://github.com/Wan-Video/Wan2.2). The features are as follows.
|
| 8 |
+
|
| 9 |
+
- fp8 support and memory reduction by block swap: Inference of a 720x1280x81frames videos with 24GB VRAM, training with 720x1280 images with 24GB VRAM
|
| 10 |
+
- Inference without installing Flash attention (using PyTorch's scaled dot product attention)
|
| 11 |
+
- Supports xformers (training and inference) and Sage attention (inference only)
|
| 12 |
+
- Support for Wan2.2 model architecture, only for 14B models
|
| 13 |
+
|
| 14 |
+
This feature is experimental.
|
| 15 |
+
|
| 16 |
+
<details>
|
| 17 |
+
<summary>日本語</summary>
|
| 18 |
+
|
| 19 |
+
[Wan2.1](https://github.com/Wan-Video/Wan2.1) および [Wan2.2](https://github.com/Wan-Video/Wan2.2) の非公式の学習および推論スクリプトです。
|
| 20 |
+
|
| 21 |
+
以下の特徴があります。
|
| 22 |
+
|
| 23 |
+
- fp8対応およびblock swapによる省メモリ化:720x1280x81framesの動画を24GB VRAMで推論可能、720x1280の画像での学習が24GB VRAMで可能
|
| 24 |
+
- Flash attentionのインストールなしでの実行(PyTorchのscaled dot product attentionを使用)
|
| 25 |
+
- xformers(学習と推論)およびSage attention(推論のみ)対応
|
| 26 |
+
- Wan2.2モデルアーキテクチャのサポート(14Bモデルのみ)
|
| 27 |
+
|
| 28 |
+
この機能は実験的なものです。
|
| 29 |
+
|
| 30 |
+
</details>
|
| 31 |
+
|
| 32 |
+
## Download the model / モデルのダウンロード
|
| 33 |
+
|
| 34 |
+
### Wan2.1
|
| 35 |
+
|
| 36 |
+
Download the T5 `models_t5_umt5-xxl-enc-bf16.pth` and CLIP `models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` from the following page: https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P/tree/main
|
| 37 |
+
|
| 38 |
+
Download the VAE from the above page `Wan2.1_VAE.pth` or download `split_files/vae/wan_2.1_vae.safetensors` from the following page: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/vae
|
| 39 |
+
|
| 40 |
+
Download the DiT weights from the following page: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models
|
| 41 |
+
|
| 42 |
+
Wan2.1 Fun Control model weights can be downloaded from [here](https://huggingface.co/alibaba-pai/Wan2.1-Fun-14B-Control). Navigate to each weight page and download. The Fun Control model seems to support not only T2V but also I2V tasks.
|
| 43 |
+
|
| 44 |
+
Please select the appropriate weights according to T2V, I2V, resolution, model size, etc.
|
| 45 |
+
|
| 46 |
+
`fp16` and `bf16` models can be used, and `fp8_e4m3fn` models can be used if `--fp8` (or `--fp8_base`) is specified without specifying `--fp8_scaled`. **Please note that `fp8_scaled` models are not supported even with `--fp8_scaled`.**
|
| 47 |
+
|
| 48 |
+
(Thanks to Comfy-Org for providing the repackaged weights.)
|
| 49 |
+
|
| 50 |
+
### Wan2.2
|
| 51 |
+
|
| 52 |
+
T5 is same as Wan2.1. CLIP is not required for Wan2.2.
|
| 53 |
+
|
| 54 |
+
VAE is also same as Wan2.1. Please use `Wan2.1_VAE.pth` from the above page. `Wan2.2_VAE.pth` is for 5B model, not compatible with 14B model.
|
| 55 |
+
|
| 56 |
+
Download the DiT weights from the following page: https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/tree/main/split_files/diffusion_models
|
| 57 |
+
|
| 58 |
+
The Wan2.2 model consists of two DiT models, one for high noise and one for low noise. Please download both.
|
| 59 |
+
|
| 60 |
+
`fp16` models can be used. **Please note that `fp8_scaled` models are not supported even with `--fp8_scaled`.**
|
| 61 |
+
|
| 62 |
+
### Model support matrix / モデルサポートマトリックス
|
| 63 |
+
|
| 64 |
+
* columns: training dtype (行:学習時のデータ型)
|
| 65 |
+
* rows: model dtype (列:モデルのデータ型)
|
| 66 |
+
|
| 67 |
+
| model \ training |bf16|fp16|--fp8_base|--fp8base & --fp8_scaled|
|
| 68 |
+
|---|---|---|---|---|
|
| 69 |
+
|bf16|✓|--|✓|✓|
|
| 70 |
+
|fp16|--|✓|✓|✓|
|
| 71 |
+
|fp8_e4m3fn|--|--|✓|--|
|
| 72 |
+
|fp8_scaled|--|--|--|--|
|
| 73 |
+
|
| 74 |
+
<details>
|
| 75 |
+
<summary>日本語</summary>
|
| 76 |
+
|
| 77 |
+
### Wan2.1
|
| 78 |
+
|
| 79 |
+
T5 `models_t5_umt5-xxl-enc-bf16.pth` およびCLIP `models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を、次のページからダウンロードしてください:https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P/tree/main
|
| 80 |
+
|
| 81 |
+
VAEは上のページから `Wan2.1_VAE.pth` をダウンロードするか、次のページから `split_files/vae/wan_2.1_vae.safetensors` をダウンロードしてください:https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/vae
|
| 82 |
+
|
| 83 |
+
DiTの重みを次のページからダウンロードしてください:https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models
|
| 84 |
+
|
| 85 |
+
Wan2.1 Fun Controlモデルの重みは、[こちら](https://huggingface.co/alibaba-pai/Wan2.1-Fun-14B-Control)から、それぞれの重みのページに遷移し、ダウンロードしてください。Fun ControlモデルはT2VだけでなくI2Vタスクにも対応しているようです。
|
| 86 |
+
|
| 87 |
+
T2VやI2V、解像度、モデルサイズなどにより適切な重みを選択してください。
|
| 88 |
+
|
| 89 |
+
`fp16` および `bf16` モデルを使用できます。また、`--fp8` (または`--fp8_base`)を指定し`--fp8_scaled`を指定をしないときには `fp8_e4m3fn` モデルを使用できま��。**`fp8_scaled` モデルはいずれの場合もサポートされていませんのでご注意ください。**
|
| 90 |
+
|
| 91 |
+
(repackaged版の重みを提供してくださっているComfy-Orgに感謝いたします。)
|
| 92 |
+
|
| 93 |
+
### Wan2.2
|
| 94 |
+
|
| 95 |
+
T5はWan2.1と同じです。Wan2.2ではCLIPは不要です。
|
| 96 |
+
|
| 97 |
+
VAEは上のページから `Wan2.1_VAE.pth` をダウンロードしてください。`Wan2.2_VAE.pth` は5Bモデル用で、14Bモデルには対応していません。
|
| 98 |
+
|
| 99 |
+
DiTの重みを次のページからダウンロードしてください:https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/tree/main/split_files/diffusion_models
|
| 100 |
+
|
| 101 |
+
Wan2.2モデルは高ノイズ用と低ノイズ用の2つのDiTモデルで構成されています。両方をダウンロードしてください。
|
| 102 |
+
|
| 103 |
+
`fp16` モデルを使用できます。**`fp8_scaled` モデルはサポートされませんのでご注意ください。**
|
| 104 |
+
|
| 105 |
+
</details>
|
| 106 |
+
|
| 107 |
+
## Pre-caching / 事前キャッシュ
|
| 108 |
+
|
| 109 |
+
Pre-caching is almost the same as in HunyuanVideo, but some options may differ. See [HunyuanVideo documentation](./hunyuan_video.md#pre-caching--事前キャッシング) and `--help` for details.
|
| 110 |
+
|
| 111 |
+
### Latent Pre-caching
|
| 112 |
+
|
| 113 |
+
Create the cache using the following command:
|
| 114 |
+
|
| 115 |
+
```bash
|
| 116 |
+
python src/musubi_tuner/wan_cache_latents.py --dataset_config path/to/toml --vae path/to/wan_vae.safetensors
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
**If you train I2V models, add `--i2v` option to the above command.** For Wan2.1, add `--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` to specify the CLIP model. If not specified, the training will raise an error. For Wan2.2, CLIP model is not required.
|
| 120 |
+
|
| 121 |
+
If you're running low on VRAM, specify `--vae_cache_cpu` to use the CPU for the VAE internal cache, which will reduce VRAM usage somewhat.
|
| 122 |
+
|
| 123 |
+
The control video settings are required for training the Fun-Control model. Please refer to [Dataset Settings](./dataset_config.md#sample-for-video-dataset-with-control-images) for details.
|
| 124 |
+
|
| 125 |
+
<details>
|
| 126 |
+
<summary>日本語</summary>
|
| 127 |
+
|
| 128 |
+
事前キャッシングはHunyuanVideoとほぼ同じです。オプションが異なる場合がありますので、詳細は[HunyuanVideoのドキュメント](./hunyuan_video.md#pre-caching--事前キャッシング)および`--help`を参照してください。
|
| 129 |
+
|
| 130 |
+
latentの事前キャッシングは上のコマンド例を使用してキャッシュを作成してください。
|
| 131 |
+
|
| 132 |
+
**I2Vモデルを学習する場合は、`--i2v` オプションを上のコマンドに追加してください。**Wan2.1の場合は、`--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を追加してCLIPモデルを指定してください。指定しないと学習時にエラーが発生します。Wan2.2ではCLIPモデルは不要です。
|
| 133 |
+
|
| 134 |
+
VRAMが不足している場合は、`--vae_cache_cpu` を指定するとVAEの内部キャッシュにCPUを使うことで、使用VRAMを多少削減できます。
|
| 135 |
+
|
| 136 |
+
Fun-Controlモデルを学習する場合は、制御用動画の設定が必要です。[データセット設定](./dataset_config.md#sample-for-video-dataset-with-control-images)を参照してください。
|
| 137 |
+
|
| 138 |
+
</details>
|
| 139 |
+
|
| 140 |
+
### Text Encoder Output Pre-caching
|
| 141 |
+
|
| 142 |
+
Text encoder output pre-caching is also almost the same as in HunyuanVideo. Create the cache using the following command:
|
| 143 |
+
|
| 144 |
+
```bash
|
| 145 |
+
python src/musubi_tuner/wan_cache_text_encoder_outputs.py --dataset_config path/to/toml --t5 path/to/models_t5_umt5-xxl-enc-bf16.pth --batch_size 16
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
Adjust `--batch_size` according to your available VRAM.
|
| 149 |
+
|
| 150 |
+
For systems with limited VRAM (less than ~16GB), use `--fp8_t5` to run the T5 in fp8 mode.
|
| 151 |
+
|
| 152 |
+
<details>
|
| 153 |
+
<summary>日本語</summary>
|
| 154 |
+
|
| 155 |
+
テキストエンコーダ出力の事前キャッシングもHunyuanVideoとほぼ同じです。上のコマンド例を使用してキャッシュを作成してください。
|
| 156 |
+
|
| 157 |
+
使用可能なVRAMに合わせて `--batch_size` を調整してください。
|
| 158 |
+
|
| 159 |
+
VRAMが限られているシステム(約16GB未満)の場合は、T5をfp8モードで実行するために `--fp8_t5` を使用してください。
|
| 160 |
+
|
| 161 |
+
</details>
|
| 162 |
+
|
| 163 |
+
## Training / 学習
|
| 164 |
+
|
| 165 |
+
### Training
|
| 166 |
+
|
| 167 |
+
Start training using the following command (input as a single line):
|
| 168 |
+
|
| 169 |
+
```bash
|
| 170 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/wan_train_network.py \
|
| 171 |
+
--task t2v-1.3B \
|
| 172 |
+
--dit path/to/wan2.1_xxx_bf16.safetensors \
|
| 173 |
+
--dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base \
|
| 174 |
+
--optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing \
|
| 175 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 176 |
+
--network_module networks.lora_wan --network_dim 32 \
|
| 177 |
+
--timestep_sampling shift --discrete_flow_shift 3.0 \
|
| 178 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 179 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 180 |
+
```
|
| 181 |
+
The above is an example. The appropriate values for `timestep_sampling` and `discrete_flow_shift` need to be determined by experimentation.
|
| 182 |
+
|
| 183 |
+
For additional options, use `python src/musubi_tuner/wan_train_network.py --help` (note that many options are unverified).
|
| 184 |
+
|
| 185 |
+
`--task` is one of `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (for Wan2.1 official models), `t2v-1.3B-FC`, `t2v-14B-FC`, and `i2v-14B-FC` (for Wan2.1 Fun Control model), `t2v-A14B`, `i2v-A14B` (for Wan2.2 14B models). Specify the DiT weights for the task with `--dit`.
|
| 186 |
+
|
| 187 |
+
You can limit the range of timesteps for training with `--min_timestep` and `--max_timestep`. The values are specified in the range of 0 to 1000 (not 0.0 to 1.0). See [here](./advanced_config.md#specify-time-step-range-for-training--学習時のタイムステップ範囲の指定) for details.
|
| 188 |
+
|
| 189 |
+
For Wan2.2 models, if you want to train with either the high-noise model or the low-noise model, specify the model with `--dit` as in Wan2.1. In this case, it is recommended to specify the range of timesteps described in the table below, and `--preserve_distribution_shape` to maintain the distribution shape.
|
| 190 |
+
|
| 191 |
+
If you want to train LoRA for both models simultaneously, you need to specify the low-noise model with `--dit` and the high-noise model with `--dit_high_noise`. The two models are switched at the timestep specified by `--timestep_boundary`. The default value is 0.9 for I2V and 0.875 for T2V. `--timestep_boundary` can be specified in the range of 0.0 to 1.0, or in the range of 0 to 1000.
|
| 192 |
+
|
| 193 |
+
When training Wan2.2 high and low models, you can use `--offload_inactive_dit` to offload the inactive DiT model to the CPU, which can save VRAM (only works when `--blocks_to_swap` is not specified). Please note that in Windows environment, this offloading uses shared VRAM. Even with fp8/fp8_scaled, about 42GB of shared VRAM is required for the two models combined, which means that about 96GB or more of main RAM is required. If you have less main RAM, using `--blocks_to_swap` will use less main RAM.
|
| 194 |
+
|
| 195 |
+
`--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 196 |
+
|
| 197 |
+
For Wan2.2 models, `--discrete_flow_shift` may need to be adjusted based on I2V and T2V. According to the official implementation, the shift values in inference are 12.0 for T2V and 5.0 for I2V. The shift values during training do not necessarily have to match those during inference, but they may serve as a useful reference.
|
| 198 |
+
|
| 199 |
+
`--force_v2_1_time_embedding` uses the same shape of time embedding as Wan2.1. This can reduce VRAM usage during inference and training (the larger the resolution and number of frames, the greater the reduction). Although this is different from the official implementation of Wan2.2, it seems that there is no effect on inference or training within the range that has been confirmed.
|
| 200 |
+
|
| 201 |
+
Don't forget to specify `--network_module networks.lora_wan`.
|
| 202 |
+
|
| 203 |
+
Other options are mostly the same as `hv_train_network.py`. See [HunyuanVideo documentation](./hunyuan_video.md#training--学習) and `--help` for details.
|
| 204 |
+
|
| 205 |
+
The trained LoRA weights are seemed to be compatible with ComfyUI (may depend on the nodes used).
|
| 206 |
+
|
| 207 |
+
#### Recommended Min/Max Timestep Settings for Wan2.2
|
| 208 |
+
|
| 209 |
+
| Model | Min Timestep | Max Timestep |
|
| 210 |
+
|-------|--------------|--------------|
|
| 211 |
+
| I2V low noise | 0 | 900 |
|
| 212 |
+
| I2V high noise | 900 | 1000 |
|
| 213 |
+
| T2V low noise | 0 | 875 |
|
| 214 |
+
| T2V high noise | 875 | 1000 |
|
| 215 |
+
|
| 216 |
+
<details>
|
| 217 |
+
<summary>日本語</summary>
|
| 218 |
+
|
| 219 |
+
サンプルは英語版を参照してください。
|
| 220 |
+
|
| 221 |
+
`timestep_sampling`や`discrete_flow_shift`は一例です。どのような値が適切かは実験が必要です。
|
| 222 |
+
|
| 223 |
+
その他のオプションについては `python src/musubi_tuner/wan_train_network.py --help` を使用してください(多くのオプションは未検証です)。
|
| 224 |
+
|
| 225 |
+
`--task` には `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (これらはWan2.1公式モデル)、`t2v-1.3B-FC`, `t2v-14B-FC`, `i2v-14B-FC`(Wan2.1-Fun Controlモデル)、`t2v-A14B`, `i2v-A14B`(Wan2.2 14Bモデル)を指定します。`--dit`に、taskに応じたDiTの重みを指定してください。
|
| 226 |
+
|
| 227 |
+
`--min_timestep`と`--max_timestep`で学習するタイムステップの範囲を限定できます。値は0から1000の範囲で指定します。詳細は[こちら](./advanced_config.md#specify-time-step-range-for-training--学習時のタイムステップ範囲の指定)を参照してください。
|
| 228 |
+
|
| 229 |
+
Wan2.2モデルの場合、高ノイズ用モデルまたは低ノイズ用モデルのどちらかで学習する場合は、Wan2.1の場合と同様に、`--dit`にそのモデルを指定してください。またこの場合、英語版サンプル内の表に示すようにタイムステップの範囲を指定し、`--preserve_distribution_shape` を指定して分布形状を維持することをお勧めします。
|
| 230 |
+
|
| 231 |
+
両方のモデルへのLoRAを学習する場合は、`--dit`に低ノイズ用モデルを、`--dit_high_noise`に高ノイズ用モデルを指定します。2つのモデルは`--timestep_boundary`で指定されたタイムステップで切り替わります。デフォルトはI2Vの場合は0.9、T2Vの場合は0.875です。`--timestep_boundary`は0.0から1.0の範囲の値、または0から1000の範囲の値で指定できます。
|
| 232 |
+
|
| 233 |
+
またWan2.2モデルで両方のモデルを学習するとき、`--offload_inactive_dit`を使用すると、使用していないDiTモデルをCPUにオフロードすることができ、VRAMを節約できます(`--blocks_to_swap`未指定時のみ有効)。なお、Windows環境の場合、このオフロードには共有VRAMが使用されます。fp8/fp8_scaledの場合でも2つのモデル合計で約42GBの共有VRAMが必要となり、つまりメインRAMが96GB程度以上必要になりますのでご注意ください。メインRAMが少ない場合、`--blocks_to_swap`を使用する方がメインRAMの使用量は少なくなります。
|
| 234 |
+
|
| 235 |
+
Wan2.2の場合、I2VとT2Vで`--discrete_flow_shift`を調整する必要があるかもしれません。公式実装によると、推論時のシフト値はT2Vで12.0、I2Vで5.0です。学習時のシフト値は推論時度必ずしも合わせる必要はありませんが、参考になるかもしれません。
|
| 236 |
+
|
| 237 |
+
`--force_v2_1_time_embedding` を指定すると、Wan2.1と同じ形状の時間埋め込みを使用します。これにより推論中、学習中のVRAM使用量を削減できます(解像度やフレーム数が大きいほど削減量も大きくなります)。Wan2.2の公式実装とは異なりますが、確認した範囲では推論、学習共に影響はないようです。
|
| 238 |
+
|
| 239 |
+
`--network_module` に `networks.lora_wan` を指定することを忘れないでください。
|
| 240 |
+
|
| 241 |
+
その他のオプションは、ほぼ`hv_train_network.py`と同様です。[HunyuanVideoのドキュメント](./hunyuan_video.md#training--学習)および`--help`を参照してください。
|
| 242 |
+
|
| 243 |
+
学習後のLoRAの重みはそのままComfyUIで使用できるようです(用いるノードにもよります)。
|
| 244 |
+
|
| 245 |
+
</details>
|
| 246 |
+
|
| 247 |
+
### Command line options for training with sampling / サンプル画像生成に関連する学習時のコマンドラインオプション
|
| 248 |
+
|
| 249 |
+
Example of command line options for training with sampling / 記述例:
|
| 250 |
+
|
| 251 |
+
```bash
|
| 252 |
+
--vae path/to/wan_vae.safetensors \
|
| 253 |
+
--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth \
|
| 254 |
+
--sample_prompts /path/to/prompt_file.txt \
|
| 255 |
+
--sample_every_n_epochs 1 --sample_every_n_steps 1000 --sample_at_first
|
| 256 |
+
```
|
| 257 |
+
Each option is the same as when generating images or as HunyuanVideo. Please refer to [here](/docs/sampling_during_training.md) for details.
|
| 258 |
+
|
| 259 |
+
If you train I2V models for Wan2.1, add `--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` to specify the CLIP model. For Wan2.2, CLIP model is not required.
|
| 260 |
+
|
| 261 |
+
You can specify the initial image, the negative prompt and the control video (for Wan2.1-Fun-Control) in the prompt file. Please refer to [here](/docs/sampling_during_training.md#prompt-file--プロンプトファイル).
|
| 262 |
+
|
| 263 |
+
<details>
|
| 264 |
+
<summary>日本語</summary>
|
| 265 |
+
|
| 266 |
+
各オプションは推論時、およびHunyuanVideoの場合と同様です。[こちら](/docs/sampling_during_training.md)を参照してください。
|
| 267 |
+
|
| 268 |
+
Wan2.1のI2Vモデルを学習する場合は、`--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を追加してCLIPモデルを指定してください。Wan2.2ではCLIPモデルは不要です。
|
| 269 |
+
|
| 270 |
+
プロンプトファイルで、初期画像やネガティブプロンプト、制御動画(Wan2.1-Fun-Control用)等を指定できます。[こちら](/docs/sampling_during_training.md#prompt-file--プロンプトファイル)を参照してください。
|
| 271 |
+
|
| 272 |
+
</details>
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
## Inference / 推論
|
| 276 |
+
|
| 277 |
+
### Inference Options Comparison / 推論オプション比較
|
| 278 |
+
|
| 279 |
+
#### Speed Comparison (Faster → Slower) / 速度比較(速い→遅い)
|
| 280 |
+
*Note: Results may vary depending on GPU type*
|
| 281 |
+
|
| 282 |
+
fp8_fast > bf16/fp16 (no block swap) > fp8 > fp8_scaled > bf16/fp16 (block swap)
|
| 283 |
+
|
| 284 |
+
#### Quality Comparison (Higher → Lower) / 品質比較(高→低)
|
| 285 |
+
|
| 286 |
+
bf16/fp16 > fp8_scaled > fp8 >> fp8_fast
|
| 287 |
+
|
| 288 |
+
### T2V Inference / T2V推論
|
| 289 |
+
|
| 290 |
+
The following is an example of T2V inference (input as a single line):
|
| 291 |
+
|
| 292 |
+
```bash
|
| 293 |
+
python src/musubi_tuner/wan_generate_video.py --fp8 --task t2v-1.3B --video_size 832 480 --video_length 81 --infer_steps 20 \
|
| 294 |
+
--prompt "prompt for the video" --save_path path/to/save.mp4 --output_type both \
|
| 295 |
+
--dit path/to/wan2.1_t2v_1.3B_bf16_etc.safetensors --vae path/to/wan_2.1_vae.safetensors \
|
| 296 |
+
--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth \
|
| 297 |
+
--attn_mode torch
|
| 298 |
+
```
|
| 299 |
+
|
| 300 |
+
`--task` is one of `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (these are Wan2.1 official models), `t2v-1.3B-FC`, `t2v-14B-FC` and `i2v-14B-FC` (for Wan2.1-Fun Control model), `t2v-A14B`, `i2v-A14B` (for Wan2.2 14B models).
|
| 301 |
+
|
| 302 |
+
For Wan2.2 models, you can specify the low-noise model with `--dit` and the high-noise model with `--dit_high_noise`. The two models are switched at the timestep specified by `--timestep_boundary`. The default is described above. If you omit the high-noise model, the low-noise model will be used for all timesteps.
|
| 303 |
+
|
| 304 |
+
When inferring Wan2 .2 high and low models, you can use `--offload_inactive_dit` to offload the inactive DiT model to the CPU, or `--lazy_loading` to enable lazy loading for DiT models, which can save VRAM. `--offload_inactive_dit` only works when `--blocks_to_swap` is not specified, so use `--lazy_loading` instead. Without these options, both models will remain on the GPU, which may use more VRAM.
|
| 305 |
+
|
| 306 |
+
`--attn_mode` is `torch`, `sdpa` (same as `torch`), `xformers`, `sageattn`,`flash2`, `flash` (same as `flash2`) or `flash3`. `torch` is the default. Other options require the corresponding library to be installed. `flash3` (Flash attention 3) is not tested.
|
| 307 |
+
|
| 308 |
+
Specifying `--fp8` runs DiT in fp8 mode. fp8 can significantly reduce memory consumption but may impact output quality.
|
| 309 |
+
|
| 310 |
+
`--fp8_scaled` can be specified in addition to `--fp8` to run the model in fp8 weights optimization. This increases memory consumption and speed slightly but improves output quality. See [here](advanced_config.md#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化) for details.
|
| 311 |
+
|
| 312 |
+
`--fp8_fast` option is also available for faster inference on RTX 40x0 GPUs. This option requires `--fp8_scaled` option. **This option seems to degrade the output quality.**
|
| 313 |
+
|
| 314 |
+
`--fp8_t5` can be used to specify the T5 model in fp8 format. This option reduces memory usage for the T5 model.
|
| 315 |
+
|
| 316 |
+
`--negative_prompt` can be used to specify a negative prompt. If omitted, the default negative prompt is used.
|
| 317 |
+
|
| 318 |
+
`--flow_shift` can be used to specify the flow shift (default 3.0 for I2V with 480p, 5.0 for others).
|
| 319 |
+
|
| 320 |
+
`--guidance_scale` can be used to specify the guidance scale for classifier free guidance (default 5.0). For Wan2.2, `--guidance_scale_high_noise` also can be specified to set a different scale for the high-noise model.
|
| 321 |
+
|
| 322 |
+
`--blocks_to_swap` is the number of blocks to swap during inference. The default value is None (no block swap). The maximum value is 39 for 14B model and 29 for 1.3B model.
|
| 323 |
+
|
| 324 |
+
`--force_v2_1_time_embedding` uses the same shape of time embedding as Wan2.1 for Wan2.2. See the training section for details.
|
| 325 |
+
|
| 326 |
+
`--vae_cache_cpu` enables VAE cache in main memory. This reduces VRAM usage slightly but processing is slower.
|
| 327 |
+
|
| 328 |
+
`--compile` enables torch.compile. See [here](/README.md#inference) for details.
|
| 329 |
+
|
| 330 |
+
`--trim_tail_frames` can be used to trim the tail frames when saving. The default is 0.
|
| 331 |
+
|
| 332 |
+
`--cfg_skip_mode` specifies the mode for skipping CFG in different steps. The default is `none` (all steps).`--cfg_apply_ratio` specifies the ratio of steps where CFG is applied. See below for details.
|
| 333 |
+
|
| 334 |
+
`--include_patterns` and `--exclude_patterns` can be used to specify which LoRA modules to apply or exclude during training. If not specified, all modules are applied by default. These options accept regular expressions.
|
| 335 |
+
|
| 336 |
+
`--include_patterns` specifies the modules to be applied, and `--exclude_patterns` specifies the modules to be excluded. The regular expression is matched against the LoRA key name, and include takes precedence.
|
| 337 |
+
|
| 338 |
+
The key name to be searched is in sd-scripts format (`lora_unet_<module_name with dot replaced by _>`). For example, `lora_unet_blocks_9_cross_attn_k`.
|
| 339 |
+
|
| 340 |
+
For example, if you specify `--exclude_patterns "blocks_[23]\d_"` , it will exclude modules containing `blocks_20` to `blocks_39`. If you specify `--include_patterns "cross_attn" --exclude_patterns "blocks_(0|1|2|3|4)_"`, it will apply LoRA to modules containing `cross_attn` and not containing `blocks_0` to `blocks_4`.
|
| 341 |
+
|
| 342 |
+
If you specify multiple LoRA weights, please specify them with multiple arguments. For example: `--include_patterns "cross_attn" ".*" --exclude_patterns "dummy_do_not_exclude" "blocks_(0|1|2|3|4)"`. `".*"` is a regex that matches everything. `dummy_do_not_exclude` is a dummy regex that does not match anything.
|
| 343 |
+
|
| 344 |
+
`--cpu_noise` generates initial noise on the CPU. This may result in the same results as ComfyUI with the same seed (depending on other settings).
|
| 345 |
+
|
| 346 |
+
If you are using the Fun Control model, specify the control video with `--control_path`. You can specify a video file or a folder containing multiple image files. The number of frames in the video file (or the number of images) should be at least the number specified in `--video_length` (plus 1 frame if you specify `--end_image_path`).
|
| 347 |
+
|
| 348 |
+
Please try to match the aspect ratio of the control video with the aspect ratio specified in `--video_size` (there may be some deviation from the initial image of I2V due to the use of bucketing processing).
|
| 349 |
+
|
| 350 |
+
Other options are same as `hv_generate_video.py` (some options are not supported, please check the help).
|
| 351 |
+
|
| 352 |
+
<details>
|
| 353 |
+
<summary>日本語</summary>
|
| 354 |
+
|
| 355 |
+
`--task` には `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (これらはWan2.1公式モデル)、`t2v-1.3B-FC`, `t2v-14B-FC`, `i2v-14B-FC`(Wan2.1-Fun Controlモデル)、`t2v-A14B`, `i2v-A14B`(Wan2.2 14Bモデル)を指定します。
|
| 356 |
+
|
| 357 |
+
Wan2.2モデルの場合、`--dit`に低ノイズ用モデルを、`--dit_high_noise`に高ノイズ用モデルを指定します。2つのモデルは`--timestep_boundary`で指定されたタイムステップで切り替わります。高ノイズ用モデルを省略した場合は、低ノイズ用モデルが全てのタイムステップで使用されます。
|
| 358 |
+
|
| 359 |
+
またWan2.2モデルで両方のモデルを用いて推論するとき、`--offload_inactive_dit`を使用すると、使用していないDiTモデルをCPUにオフロードすることができます。また`--lazy_loading`を使用すると、DiTモデルの遅延読み込みを有効します。これらのオプションによりVRAMを節約できます。`--offload_inactive_dit`は`--blocks_to_swap`が指定されていない場合にのみ利用できます。`--block_to_swap`を使うときには`--lazy_loading`を使用してください。これらのオプションを指定しないと両方のモデルがGPUに置かれますので、VRAMを多く使用します。
|
| 360 |
+
|
| 361 |
+
`--attn_mode` には `torch`, `sdpa`(`torch`と同じ)、`xformers`, `sageattn`, `flash2`, `flash`(`flash2`と同じ), `flash3` のいずれかを指定します。デフォルトは `torch` です。その他のオプションを使用する場合は、対応するライブラリをインストールする必要があります。`flash3`(Flash attention 3)は未テストです。
|
| 362 |
+
|
| 363 |
+
`--fp8` を指定するとDiTモデルをfp8形式で実行します。fp8はメモリ消費を大幅に削減できますが、出力品質に影響を与える可能性があります。
|
| 364 |
+
|
| 365 |
+
`--fp8_scaled` を `--fp8` と併用すると、fp8への重み量子化を行います。メモリ消費と速度はわずかに悪化しますが、出力品質が向上します。詳しくは[こちら](advanced_config.md#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化)を参照してください。
|
| 366 |
+
|
| 367 |
+
`--fp8_fast` オプションはRTX 40x0 GPUでの高速推論に使用されるオプションです。このオプションは `--fp8_scaled` オプションが必要です。**出力品質が劣化するようです。**
|
| 368 |
+
|
| 369 |
+
`--fp8_t5` を指定するとT5モデルをfp8形式で実行します。T5モデル呼び出し時のメモリ使用量を削減します。
|
| 370 |
+
|
| 371 |
+
`--negative_prompt` でネガティブプロンプトを指定できます。省略した場合はデフォルトのネガティブプロンプトが使用されます。
|
| 372 |
+
|
| 373 |
+
`--flow_shift` でflow shiftを指定できます(480pのI2Vの場合はデフォルト3.0、それ以外は5.0)。
|
| 374 |
+
|
| 375 |
+
`--guidance_scale` でclassifier free guianceのガイダンススケールを指定できます(デフォルト5.0)。Wan2.2の場合は、`--guidance_scale_high_noise` で高ノイズ用モデルのガイダンススケールを別に指定できます。
|
| 376 |
+
|
| 377 |
+
`--blocks_to_swap` は推論時のblock swapの数です。デフォルト値はNone(block swapなし)です。最大値は14Bモデルの場合39、1.3Bモデルの場合29です。
|
| 378 |
+
|
| 379 |
+
`--force_v2_1_time_embedding` はWan2.2の場合に有効で、Wan2.1と同じ形状の時間埋め込みを使用します。詳細は学習セクションを参照してください。
|
| 380 |
+
|
| 381 |
+
`--vae_cache_cpu` を有効にすると、VAEのキャッシュをメインメモリに保持します。VRAM使用量が多少減りますが、処理は遅くなります。
|
| 382 |
+
|
| 383 |
+
`--compile`でtorch.compileを有効にします。詳細については[こちら](/README.md#inference)を参照してください。
|
| 384 |
+
|
| 385 |
+
`--trim_tail_frames` で保存時に末尾のフレームをトリミングできます。デフォルトは0です。
|
| 386 |
+
|
| 387 |
+
`--cfg_skip_mode` は異なるステップでCFGをスキップするモードを指定します。デフォルトは `none`(全ステップ)。`--cfg_apply_ratio` はCFGが適用されるステップの割合を指定します。詳細は後述します。
|
| 388 |
+
|
| 389 |
+
LoRAのどのモジュールを適用するかを、`--include_patterns`と`--exclude_patterns`で指定できます(未指定時・デフォルトは全モジュール適用されます
|
| 390 |
+
)。これらのオプションには、正規表現を指定します。`--include_patterns`は適用するモジュール、`--exclude_patterns`は適用しないモジュールを指定します。正規表現がLoRAのキー名に含まれるかどうかで判断され、includeが優先されます。
|
| 391 |
+
|
| 392 |
+
検索対象となるキー名は sd-scripts 形式(`lora_unet_<モジュール名のドットを_に置換したもの>`)です。例:`lora_unet_blocks_9_cross_attn_k`
|
| 393 |
+
|
| 394 |
+
たとえば `--exclude_patterns "blocks_[23]\d_"`のみを指定すると、`blocks_20`から`blocks_39`を含むモジュールが除外されます。`--include_patterns "cross_attn" --exclude_patterns "blocks_(0|1|2|3|4)_"`のようにincludeとexcludeを指定すると、`cross_attn`を含むモジュールで、かつ`blocks_0`から`blocks_4`を含まないモジュ��ルにLoRAが適用されます。
|
| 395 |
+
|
| 396 |
+
複数のLoRAの重みを指定する場合は、複数個の引数で指定してください。例:`--include_patterns "cross_attn" ".*" --exclude_patterns "dummy_do_not_exclude" "blocks_(0|1|2|3|4)"` `".*"`は全てにマッチする正規表現です。`dummy_do_not_exclude`は何にもマッチしないダミーの正規表現です。
|
| 397 |
+
|
| 398 |
+
`--cpu_noise`を指定すると初期ノイズをCPUで生成します。これにより同一seed時の結果がComfyUIと同じになる可能性があります(他の設定にもよります)。
|
| 399 |
+
|
| 400 |
+
Fun Controlモデルを使用する場合は、`--control_path`で制御用の映像を指定します。動画ファイル、または複数枚の画像ファイルを含んだフォルダを指定できます。動画ファイルのフレーム数(または画像の枚数)は、`--video_length`で指定したフレーム数以上にしてください(後述の`--end_image_path`を指定した場合は、さらに+1フレーム)。
|
| 401 |
+
|
| 402 |
+
制御用の映像のアスペクト比は、`--video_size`で指定したアスペクト比とできるかぎり合わせてください(bucketingの処理を流用しているためI2Vの初期画像とズレる場合があります)。
|
| 403 |
+
|
| 404 |
+
その他のオプションは `hv_generate_video.py` と同じです(一部のオプションはサポートされていないため、ヘルプを確認してください)。
|
| 405 |
+
|
| 406 |
+
</details>
|
| 407 |
+
|
| 408 |
+
#### CFG Skip Mode / CFGスキップモード
|
| 409 |
+
|
| 410 |
+
These options allow you to balance generation speed against prompt accuracy. More skipped steps results in faster generation with potential quality degradation.
|
| 411 |
+
|
| 412 |
+
Setting `--cfg_apply_ratio` to 0.5 speeds up the denoising loop by up to 25%.
|
| 413 |
+
|
| 414 |
+
`--cfg_skip_mode` specified one of the following modes:
|
| 415 |
+
|
| 416 |
+
- `early`: Skips CFG in early steps for faster generation, applying guidance mainly in later refinement steps
|
| 417 |
+
- `late`: Skips CFG in later steps, applying guidance during initial structure formation
|
| 418 |
+
- `middle`: Skips CFG in middle steps, applying guidance in both early and later steps
|
| 419 |
+
- `early_late`: Skips CFG in both early and late steps, applying only in middle steps
|
| 420 |
+
- `alternate`: Applies CFG in alternate steps based on the specified ratio
|
| 421 |
+
- `none`: Applies CFG at all steps (default)
|
| 422 |
+
|
| 423 |
+
`--cfg_apply_ratio` specifies a value from 0.0 to 1.0 controlling the proportion of steps where CFG is applied. For example, setting 0.5 means CFG will be applied in only 50% of the steps.
|
| 424 |
+
|
| 425 |
+
If num_steps is 10, the following table shows the steps where CFG is applied based on the `--cfg_skip_mode` option (A means CFG is applied, S means it is skipped, `--cfg_apply_ratio` is 0.6):
|
| 426 |
+
|
| 427 |
+
| skip mode | CFG apply pattern |
|
| 428 |
+
|---|---|
|
| 429 |
+
| early | SSSSAAAAAA |
|
| 430 |
+
| late | AAAAAASSSS |
|
| 431 |
+
| middle | AAASSSSAAA |
|
| 432 |
+
| early_late | SSAAAAAASS |
|
| 433 |
+
| alternate | SASASAASAS |
|
| 434 |
+
|
| 435 |
+
The appropriate settings are unknown, but you may want to try `late` or `early_late` mode with a ratio of around 0.3 to 0.5.
|
| 436 |
+
<details>
|
| 437 |
+
<summary>日本語</summary>
|
| 438 |
+
これらのオプションは、生成速度とプロンプトの精度のバランスを取ることができます。スキップされるステップが多いほど、生成速度が速くなりますが、品質が低下する可能性があります。
|
| 439 |
+
|
| 440 |
+
ratioに0.5を指定することで、デノイジングのループが最大25%程度、高速化されます。
|
| 441 |
+
|
| 442 |
+
`--cfg_skip_mode` は次のモードのいずれかを指定します:
|
| 443 |
+
|
| 444 |
+
- `early`:初期のステップでCFGをスキップして、主に終盤の精細化のステップで適用します
|
| 445 |
+
- `late`:終盤のステップでCFGをスキップし、初期の構造が決まる段階で適用します
|
| 446 |
+
- `middle`:中間のステップでCFGをスキップし、初期と終盤のステップの両方で適用します
|
| 447 |
+
- `early_late`:初期と終盤のステップの両方でCFGをスキップし、中間のステップのみ適用します
|
| 448 |
+
- `alternate`:指定された割合に基づいてCFGを適用します
|
| 449 |
+
|
| 450 |
+
`--cfg_apply_ratio` は、CFGが適用されるステップの割合を0.0から1.0の値で指定します。たとえば、0.5に設定すると、CFGはステップの50%のみで適用されます。
|
| 451 |
+
|
| 452 |
+
具体的なパターンは上のテーブルを参照してください。
|
| 453 |
+
|
| 454 |
+
適切な設定は不明ですが、モードは`late`または`early_late`、ratioは0.3~0.5程度から試してみると良いかもしれません。
|
| 455 |
+
</details>
|
| 456 |
+
|
| 457 |
+
#### Skip Layer Guidance
|
| 458 |
+
|
| 459 |
+
Skip Layer Guidance is a feature that uses the output of a model with some blocks skipped as the unconditional output of classifier free guidance. It was originally proposed in [SD 3.5](https://github.com/comfyanonymous/ComfyUI/pull/5404) and first applied in Wan2GP in [this PR](https://github.com/deepbeepmeep/Wan2GP/pull/61). It may improve the quality of generated videos.
|
| 460 |
+
|
| 461 |
+
The implementation of SD 3.5 is [here](https://github.com/Stability-AI/sd3.5/blob/main/sd3_impls.py), and the implementation of Wan2GP (the PR mentioned above) has some different specifications. This inference script allows you to choose between the two methods.
|
| 462 |
+
|
| 463 |
+
*The SD3.5 method applies slg output in addition to cond and uncond (slows down the speed). The Wan2GP method uses only cond and slg output.*
|
| 464 |
+
|
| 465 |
+
The following arguments are available:
|
| 466 |
+
|
| 467 |
+
- `--slg_mode`: Specifies the SLG mode. `original` for SD 3.5 method, `uncond` for Wan2GP method. Default is None (no SLG).
|
| 468 |
+
- `--slg_layers`: Specifies the indices of the blocks (layers) to skip in SLG, separated by commas. Example: `--slg_layers 4,5,6`. Default is empty (no skip). If this option is not specified, `--slg_mode` is ignored.
|
| 469 |
+
- `--slg_scale`: Specifies the scale of SLG when `original`. Default is 3.0.
|
| 470 |
+
- `--slg_start`: Specifies the start step of SLG application in inference steps from 0.0 to 1.0. Default is 0.0 (applied from the beginning).
|
| 471 |
+
- `--slg_end`: Specifies the end step of SLG application in inference steps from 0.0 to 1.0. Default is 0.3 (applied up to 30% from the beginning).
|
| 472 |
+
|
| 473 |
+
Appropriate settings are unknown, but you may want to try `original` mode with a scale of around 3.0 and a start ratio of 0.0 and an end ratio of 0.5, with layers 4, 5, and 6 skipped.
|
| 474 |
+
|
| 475 |
+
<details>
|
| 476 |
+
<summary>日本語</summary>
|
| 477 |
+
Skip Layer Guidanceは、一部のblockをスキップしたモデル出力をclassifier free guidanceのunconditional出力に使用する機能です。元々は[SD 3.5](https://github.com/comfyanonymous/ComfyUI/pull/5404)で提案されたもので、Wan2.1には[Wan2GPのこちらのPR](https://github.com/deepbeepmeep/Wan2GP/pull/61)で初めて適用されました。生成動画の品質が向上する可能性があります。
|
| 478 |
+
|
| 479 |
+
SD 3.5の実装は[こちら](https://github.com/Stability-AI/sd3.5/blob/main/sd3_impls.py)で、Wan2GPの実装(前述のPR)は一部仕様が異なります。この推論スクリプトでは両者の方式を選択できるようになっています。
|
| 480 |
+
|
| 481 |
+
※SD3.5方式はcondとuncondに加えてslg outputを適用します(速度が低下します)。Wan2GP方式はcondとslg outputのみを使用します。
|
| 482 |
+
|
| 483 |
+
以下の引数があります。
|
| 484 |
+
|
| 485 |
+
- `--slg_mode`:SLGのモードを指定します。`original`でSD 3.5の方式、`uncond`でWan2GPの方式です。デフォルトはNoneで、SLGを使用しません。
|
| 486 |
+
- `--slg_layers`:SLGでスキップするblock (layer)のインデクスをカンマ区切りで指定します。例:`--slg_layers 4,5,6`。デフォルトは空(スキップしない)です。このオプションを指定しないと`--slg_mode`は無視されます。
|
| 487 |
+
- `--slg_scale`:`original`のときのSLGのスケールを指定します。デフォルトは3.0です。
|
| 488 |
+
- `--slg_start`:推論ステップのSLG適用開始ステップを0.0から1.0の割合で指定します。デフォルトは0.0です(最初から適用)。
|
| 489 |
+
- `--slg_end`:推論ステップのSLG適用終了ステップを0.0から1.0の割合で指定します。デフォルトは0.3です(最初から30%まで適用)。
|
| 490 |
+
|
| 491 |
+
適切な設定は不明ですが、`original`モードでスケールを3.0程度、開始割合を0.0、終了割合を0.5程度に設定し、4, 5, 6のlayerをスキップする設定から始めると良いかもしれません。
|
| 492 |
+
</details>
|
| 493 |
+
|
| 494 |
+
### I2V Inference / I2V推論
|
| 495 |
+
|
| 496 |
+
The following is an example of I2V inference (input as a single line):
|
| 497 |
+
|
| 498 |
+
```bash
|
| 499 |
+
python src/musubi_tuner/wan_generate_video.py --fp8 --task i2v-14B --video_size 832 480 --video_length 81 --infer_steps 20 \
|
| 500 |
+
--prompt "prompt for the video" --save_path path/to/save.mp4 --output_type both \
|
| 501 |
+
--dit path/to/wan2.1_i2v_480p_14B_bf16_etc.safetensors --vae path/to/wan_2.1_vae.safetensors \
|
| 502 |
+
--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth --clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth \
|
| 503 |
+
--attn_mode torch --image_path path/to/image.jpg
|
| 504 |
+
```
|
| 505 |
+
|
| 506 |
+
For Wan2.1, add `--clip` to specify the CLIP model. For Wan2.2, CLIP model is not required. `--image_path` is the path to the image to be used as the initial frame.
|
| 507 |
+
|
| 508 |
+
`--end_image_path` can be used to specify the end image. This option is experimental. When this option is specified, the saved video will be slightly longer than the specified number of frames and will have noise, so it is recommended to specify `--trim_tail_frames 3` to trim the tail frames.
|
| 509 |
+
|
| 510 |
+
You can also use the Fun Control model for I2V inference. Specify the control video with `--control_path`.
|
| 511 |
+
|
| 512 |
+
Other options are same as T2V inference.
|
| 513 |
+
|
| 514 |
+
<details>
|
| 515 |
+
<summary>日本語</summary>
|
| 516 |
+
Wan2.1の場合は`--clip` を追加してCLIPモデルを指定します。Wan2.2ではCLIPモデルは不要です。`--image_path` は初期フレームとして使用する画像のパスです。
|
| 517 |
+
|
| 518 |
+
`--end_image_path` で終了画像を指定できます。このオプションは実験的なものです。このオプションを指定すると、保存される動画が指定フレーム数よりもやや多くなり、かつノイズが乗るため、`--trim_tail_frames 3` などを指定して末尾のフレームをトリミングすることをお勧���します。
|
| 519 |
+
|
| 520 |
+
I2V推論でもFun Controlモデルが使用できます。`--control_path` で制御用の映像を指定します。
|
| 521 |
+
|
| 522 |
+
その他のオプションはT2V推論と同じです。
|
| 523 |
+
</details>
|
| 524 |
+
|
| 525 |
+
### New Batch and Interactive Modes / 新しいバッチモードとインタラクティブモード
|
| 526 |
+
|
| 527 |
+
In addition to single video generation, Wan 2.1/2.2 now supports batch generation from file and interactive prompt input:
|
| 528 |
+
|
| 529 |
+
#### Batch Mode from File / ファイルからのバッチモード
|
| 530 |
+
|
| 531 |
+
Generate multiple videos from prompts stored in a text file:
|
| 532 |
+
|
| 533 |
+
```bash
|
| 534 |
+
python src/musubi_tuner/wan_generate_video.py --from_file prompts.txt --task t2v-14B \
|
| 535 |
+
--dit path/to/model.safetensors --vae path/to/vae.safetensors \
|
| 536 |
+
--t5 path/to/t5_model.pth --save_path output_directory
|
| 537 |
+
```
|
| 538 |
+
|
| 539 |
+
The prompts file format:
|
| 540 |
+
- One prompt per line
|
| 541 |
+
- Empty lines and lines starting with # are ignored (comments)
|
| 542 |
+
- Each line can include prompt-specific parameters using command-line style format:
|
| 543 |
+
|
| 544 |
+
```
|
| 545 |
+
A beautiful sunset over mountains --w 832 --h 480 --f 81 --d 42 --s 20
|
| 546 |
+
A busy city street at night --w 480 --h 832 --g 7.5 --n low quality, blurry
|
| 547 |
+
```
|
| 548 |
+
|
| 549 |
+
Supported inline parameters (if ommitted, default values from the command line are used):
|
| 550 |
+
- `--w`: Width
|
| 551 |
+
- `--h`: Height
|
| 552 |
+
- `--f`: Frame count
|
| 553 |
+
- `--d`: Seed
|
| 554 |
+
- `--s`: Inference steps
|
| 555 |
+
- `--g` or `--l`: Guidance scale
|
| 556 |
+
- `--fs`: Flow shift
|
| 557 |
+
- `--i`: Image path (for I2V)
|
| 558 |
+
- `--cn`: Control path (for Fun Control)
|
| 559 |
+
- `--n`: Negative prompt
|
| 560 |
+
|
| 561 |
+
In batch mode, models are loaded once and reused for all prompts, significantly improving overall generation time compared to multiple single runs.
|
| 562 |
+
|
| 563 |
+
#### Interactive Mode / インタラクティブモード
|
| 564 |
+
|
| 565 |
+
Interactive command-line interface for entering prompts:
|
| 566 |
+
|
| 567 |
+
```bash
|
| 568 |
+
python src/musubi_tuner/wan_generate_video.py --interactive --task t2v-14B \
|
| 569 |
+
--dit path/to/model.safetensors --vae path/to/vae.safetensors \
|
| 570 |
+
--t5 path/to/t5_model.pth --save_path output_directory
|
| 571 |
+
```
|
| 572 |
+
|
| 573 |
+
In interactive mode:
|
| 574 |
+
- Enter prompts directly at the command line
|
| 575 |
+
- Use the same inline parameter format as batch mode
|
| 576 |
+
- Use Ctrl+D (or Ctrl+Z on Windows) to exit
|
| 577 |
+
- Models remain loaded between generations for efficiency
|
| 578 |
+
|
| 579 |
+
<details>
|
| 580 |
+
<summary>日本語</summary>
|
| 581 |
+
単一動画の生成に加えて、Wan 2.1/2.2は現在、ファイルからのバッチ生成とインタラクティブなプロンプト入力をサポートしています。
|
| 582 |
+
|
| 583 |
+
#### ファイルからのバッチモード
|
| 584 |
+
|
| 585 |
+
テキストファイルに保存されたプロンプトから複数の動画を生成します:
|
| 586 |
+
|
| 587 |
+
```bash
|
| 588 |
+
python src/musubi_tuner/wan_generate_video.py --from_file prompts.txt --task t2v-14B \
|
| 589 |
+
--dit path/to/model.safetensors --vae path/to/vae.safetensors \
|
| 590 |
+
--t5 path/to/t5_model.pth --save_path output_directory
|
| 591 |
+
```
|
| 592 |
+
|
| 593 |
+
プロンプトファイルの形式:
|
| 594 |
+
- 1行に1つのプロンプト
|
| 595 |
+
- 空行や#で始まる行は無視されます(コメント)
|
| 596 |
+
- 各行にはコマンドライン形式でプロンプト固有のパラメータを含めることができます:
|
| 597 |
+
|
| 598 |
+
サポートされているインラインパラメータ(省略した場合、コマンドラインのデフォルト値が使用されます)
|
| 599 |
+
- `--w`: 幅
|
| 600 |
+
- `--h`: 高さ
|
| 601 |
+
- `--f`: フレーム数
|
| 602 |
+
- `--d`: シード
|
| 603 |
+
- `--s`: 推論ステップ
|
| 604 |
+
- `--g` または `--l`: ガイダンススケール
|
| 605 |
+
- `--fs`: フローシフト
|
| 606 |
+
- `--i`: 画像パス(I2V用)
|
| 607 |
+
- `--cn`: コントロールパス(Fun Control用)
|
| 608 |
+
- `--n`: ネガティブプロンプト
|
| 609 |
+
|
| 610 |
+
バッチモードでは、モデルは一度だけロードされ、すべてのプロンプトで再利用されるため、複数回の単一実行と比較して全体的な生成時間が大幅に改善されます。
|
| 611 |
+
|
| 612 |
+
#### インタラクティブモード
|
| 613 |
+
|
| 614 |
+
プロンプトを入力するためのインタラクティブなコマンドラインインターフェース:
|
| 615 |
+
|
| 616 |
+
```bash
|
| 617 |
+
python src/musubi_tuner/wan_generate_video.py --interactive --task t2v-14B \
|
| 618 |
+
--dit path/to/model.safetensors --vae path/to/vae.safetensors \
|
| 619 |
+
--t5 path/to/t5_model.pth --save_path output_directory
|
| 620 |
+
```
|
| 621 |
+
|
| 622 |
+
インタラクティブモードでは:
|
| 623 |
+
- コマンドラインで直接プロンプトを入力
|
| 624 |
+
- バッチモードと同じインラインパラメータ形式を使用
|
| 625 |
+
- 終了するには Ctrl+D (Windowsでは Ctrl+Z) を使用
|
| 626 |
+
- 効率のため、モデルは生成間で読み込まれたままになります
|
| 627 |
+
|
| 628 |
+
</details>
|
docs/wan_1f.md
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Wan2.1 One Frame (Single Frame) Inference and Training / Wan2.1 1フレーム推論と学習
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the application of "One Frame Inference" found in the FramePack model to Wan2.1.
|
| 6 |
+
|
| 7 |
+
1. **Basic One Frame Inference**:
|
| 8 |
+
* Input the starting image and prompt, limiting the number of frames to generate to 1 frame. Use the Wan2.1 I2V model.
|
| 9 |
+
* Intentionally set a large value for the RoPE timestamp assigned to the generated single frame. This aims to obtain a single static image that has changed temporally and semantically according to the prompt from the starting image.
|
| 10 |
+
* However, unlike FramePack, using Wan2.1's model as is for inference results in images that are almost identical to the starting image, with noise mixed in. This seems to be due to the characteristics of Wan2.1.
|
| 11 |
+
* By additionally training a LoRA, it is possible to reflect changes according to the prompt in the generated image while also reducing noise.
|
| 12 |
+
|
| 13 |
+
2. **Intermediate Frame One Frame Inference**:
|
| 14 |
+
* Similar to the kisekaeichi method, use the FLF2V (First and Last Frame to Video) method to generate intermediate frames. Use the FLF2V model.
|
| 15 |
+
* Set the RoPE timestamp of the generated image to an intermediate value between the timestamps of the starting image and the ending image.
|
| 16 |
+
* (This is a theoretical proposal, implemented but not yet tested.)
|
| 17 |
+
|
| 18 |
+
<details>
|
| 19 |
+
<summary>日本語</summary>
|
| 20 |
+
|
| 21 |
+
このドキュメントでは、FramePackモデルで見いだされた「1フレーム推論」の、Wan2.1への適用について説明します。
|
| 22 |
+
|
| 23 |
+
1. **基本的な1フレーム推論**:
|
| 24 |
+
* 開始画像とプロンプトを入力とし、生成するフレーム数を1フレームに限定します。Wan2.1の I2V モデルを使用します。
|
| 25 |
+
* この際、生成する1フレームに割り当てるRoPEのタイムスタンプを意図的に大きな値に設定します。これは開始画像からプロンプトに従って時間的・意味的に変化した単一の静止画を得ることを目的としています。
|
| 26 |
+
* しかしながらFramePackと異なり、Wan2.1のモデルをそのまま利用した推論では、このように設定しても生成される画像は開始画像とほぼ同じものになり、またノイズも混ざります。これはWan2.1の特性によるもの思われます。
|
| 27 |
+
* 追加でLoRAを学習することで、プロンプトに従った変化を生成画像に反映させることが可能で、かつノイズも抑えられることがわかりました。
|
| 28 |
+
|
| 29 |
+
2. **中間フレームの1フレーム推論**:
|
| 30 |
+
* kisekaeichi方式と似た、FLF2V (First and Last Frame to Video) 方式を利用し、中間のフレームを生成します。FLF2Vモデルを使用します。
|
| 31 |
+
* 生成する画像のRoPEタイムスタンプを、開始画像のタイムスタンプと終端画像のタイムスタンプの中間的な値に設定します。
|
| 32 |
+
|
| 33 |
+
</details>
|
| 34 |
+
|
| 35 |
+
## One (single) Frame Inference / 1フレーム推論
|
| 36 |
+
|
| 37 |
+
**This feature is highly experimental** and is not officially supported. It is an independent implementation, not an official feature of Wan2.1.
|
| 38 |
+
|
| 39 |
+
To perform one-frame inference, specify the `--one_frame_inference` option with `target_index` and `control_index`. In Wan2.1, it is necessary to combine this with LoRA, so please set it up similarly to LoRA training settings. The model used should also be the same.
|
| 40 |
+
|
| 41 |
+
An example description is as follows:
|
| 42 |
+
|
| 43 |
+
```bash
|
| 44 |
+
--output_type latent_images --image_path start_image.png --control_image_path start_image.png \
|
| 45 |
+
--one_frame_inference control_index=0,target_index=1
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
To perform one-frame inference for intermediate frames, specify multiple indices for `control_index` separated by semicolons. The description is as follows:
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
--output_type latent_images --image_path start_image.png --control_image_path start_image.png end_image.png \
|
| 52 |
+
--one_frame_inference control_index=0;2,target_index=1
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
When specifying `--output_type` as `latent_images`, both latent and image will be saved.
|
| 56 |
+
|
| 57 |
+
The `--image_path` is used to obtain CLIP features for one-frame inference. Usually, the starting image should be specified. The `--end_image_path` is used to obtain CLIP features for the ending image. Usually, the ending image should be specified.
|
| 58 |
+
|
| 59 |
+
The `--control_image_path` is a newly added argument to specify the control image. Usually, the starting image (and both starting and ending images for intermediate frame inference) should be specified.
|
| 60 |
+
|
| 61 |
+
The options for `--one_frame_inference` are specified as comma-separated values. Here, the index represents the RoPE timestamp.
|
| 62 |
+
|
| 63 |
+
- `target_index=<integer>`: Specifies the index of the generated image.
|
| 64 |
+
- `control_index=<integer or semicolon-separated integers>`: Specifies the index of the control image. Please specify the same number of indices as the number of control images specified in `--control_image_path`.
|
| 65 |
+
|
| 66 |
+
The optimal values for `target_index` and `control_index` are unknown. Please specify `target_index` as 1 or greater. For one-frame inference, specify `control_index=0`. For intermediate frame one-frame inference, specify `control_index=0;2`, where 0 and a value greater than `target_index` are specified.
|
| 67 |
+
|
| 68 |
+
<details>
|
| 69 |
+
<summary>日本語</summary>
|
| 70 |
+
|
| 71 |
+
**この機能は非常に実験的であり**、公式にはサポートされていません。Wan2.1公式の機能ではなく、独自の実装です。
|
| 72 |
+
|
| 73 |
+
1フレーム推論を行うには`--one_frame_inference`オプションに `target_index` と `control_index` を指定してください。Wan2.1ではLoRAとの組み合わせが必要になりますので、LoRAの学習設定と同様の設定を行ってください。使用するモデルについても同様です。
|
| 74 |
+
|
| 75 |
+
記述例は以下の通りです。
|
| 76 |
+
|
| 77 |
+
```bash
|
| 78 |
+
--output_type latent_images --image_path start_image.png --control_image_path start_image.png \
|
| 79 |
+
--one_frame_inference control_index=0,target_index=1
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
中間フレームの1フレーム推論を行うには、`control_index`にセミコロン区切りで複数のインデックスを指定します。以下のように記述します。
|
| 83 |
+
|
| 84 |
+
```bash
|
| 85 |
+
--output_type latent_images --image_path start_image.png --end_image_path end_image.png \
|
| 86 |
+
--control_image_path start_image.png end_image.png --one_frame_inference control_index=0;2,target_index=1
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
`--output_type`に`latent_images`を指定するとlatentと画像の両方が保存されます。
|
| 90 |
+
|
| 91 |
+
`--image_path`は、1フレーム推論ではCLIPの特徴量を取得するために用いられます。通常は開始画像を指定してください。`--end_image_path`は、終了画像のCLIP特徴量を取得するために用いられます。通常は終了画像を指定してください。
|
| 92 |
+
|
| 93 |
+
`--control_image_path`は新しく追加された引数で、制御用画像を指定するために用いられます。通常は開始画像(中間フレーム推論の場合は開始画像と終了画像の両方)を指定してください。
|
| 94 |
+
|
| 95 |
+
`--one_frame_inference`のオプションには、カンマ区切りで以下のオプションを指定します。ここでindexはRoPEのタイムスタンプを表します。
|
| 96 |
+
|
| 97 |
+
- `target_index=<整数>`: 生成する画像のindexを指定します。
|
| 98 |
+
- `control_index=<整数またはセミコロン区切りの整数>`: 制御用画像のindexを指定します。`--control_image_path`で指定した制御用画像の数と同じ数のインデックスを指定してください。
|
| 99 |
+
|
| 100 |
+
`target_index`、`control_index`の最適値は不明です。`target_index`は1以上を指定してください。`control_index`は、1フレーム推論では`control_index=0`を指定します。中間フレームの1フレーム推論では、`control_index=0;2`のように、0と`target_index`より大きい値を指定します。
|
| 101 |
+
|
| 102 |
+
</details>
|
| 103 |
+
|
| 104 |
+
## One Frame (Single Frame) Training / 1フレーム学習
|
| 105 |
+
|
| 106 |
+
**This feature is experimental.** It performs training in a manner similar to one-frame inference.
|
| 107 |
+
|
| 108 |
+
This currently reuses the dataset settings of the FramePack model. Please refer to the [FramePack documentation](./framepack_1f.md#one-frame-single-frame-training--1フレーム学習) and the [FramePack dataset settings](./dataset_config.md#framepack-one-frame-training).
|
| 109 |
+
|
| 110 |
+
`fp_1f_clean_indices` corresponds to the `control_index` described below.
|
| 111 |
+
|
| 112 |
+
However, `fp_1f_no_post` is ignored in Wan2.1, and alpha masks are not yet supported.
|
| 113 |
+
|
| 114 |
+
When performing one-frame training, please create the cache by specifying `--one_frame` in `wan_cache_latents.py`. Also, specify `--one_frame` in `wan_train_network.py` to change the inference method for sample image generation.
|
| 115 |
+
|
| 116 |
+
In one-frame training, the I2V 14B model is used. Specify `--task i2v-14B` and the corresponding weights. For intermediate frame one-frame training, the FLF2V model is used. Specify `--task flf2v-14B` and the corresponding weights.
|
| 117 |
+
|
| 118 |
+
In simple experiments for intermediate frame one-frame training, using `control_index=0;2`, `target_index=1` (in dataset settings, `fp_1f_clean_indices = [0, 2]`, `fp_1f_target_index = 1`), yielded better results than `0;10` and `5`.
|
| 119 |
+
|
| 120 |
+
The optimal training settings are currently unknown. Feedback is welcome.
|
| 121 |
+
|
| 122 |
+
### Example of prompt file description for sample generation
|
| 123 |
+
|
| 124 |
+
The description is almost the same as for FramePack. The command line option `--one_frame_inference` corresponds to `--of`, and `--control_image_path` corresponds to `--ci`. `--ei` is used to specify the ending image.
|
| 125 |
+
|
| 126 |
+
Note that while `--ci` can be specified multiple times, it should be specified as `--ci img1.png --ci img2.png`, unlike `--control_image_path` which is specified as `--control_image_path img1.png img2.png`.
|
| 127 |
+
|
| 128 |
+
For normal one-frame training:
|
| 129 |
+
```
|
| 130 |
+
The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
For intermediate frame one-frame training
|
| 134 |
+
```
|
| 135 |
+
The girl wears a school uniform. --i path/to/start.png --ei path/to/end.png --ci path/to/start.png --ci path/to/end.png --of target_index=1,control_index=0;2 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
<details>
|
| 139 |
+
<summary>日本語</summary>
|
| 140 |
+
|
| 141 |
+
**この機能は実験的なものです。** 1フレーム推論と同様の方法で学習を行います。
|
| 142 |
+
|
| 143 |
+
現在は、FramePackモデルのデータセット設定を流用しています。[FramePackのドキュメント](./framepack_1f.md#one-frame-single-frame-training--1フレーム学習)および
|
| 144 |
+
[FramePackのデータセット設定](./dataset_config.md#framepack-one-frame-training)を参照してください。
|
| 145 |
+
|
| 146 |
+
`fp_1f_clean_indices` が後述の `control_index` に相当します。
|
| 147 |
+
|
| 148 |
+
ただし、`fp_1f_no_post`はWan2.1では無視されます。またアルファ値によるマスクも未対応です。
|
| 149 |
+
|
| 150 |
+
1フレーム学習時は、`wan_cache_latents.py`に`--one_frame`を指定してキャッシュを作成してください。また、`wan_train_network.py`に`--one_frame`を指定してサンプル画像生成時の推論方法を変更してください。
|
| 151 |
+
|
| 152 |
+
1フレーム学習ではI2Vの14Bモデルを使用します。`--task i2v-14B`を指定し、該当する重みを指定してください。中間フレームの1フレーム学習では、FLF2Vモデルを使用します。`--task flf2v-14B`を指定し、該当する重みを指定してください。
|
| 153 |
+
|
| 154 |
+
中間フレーム学習の簡単な実験では、`control_index=0;2`、`target_index=1`が(データセット設定では `fp_1f_clean_indices = [0, 2]`、`fp_1f_target_index = 1`)、`0;10`および`5`よりも良い結果を得られました。
|
| 155 |
+
|
| 156 |
+
最適な学習設定は今のところ不明です。フィードバックを歓迎します。
|
| 157 |
+
|
| 158 |
+
**サンプル生成のプロンプトファイル記述例**
|
| 159 |
+
|
| 160 |
+
FramePackとほぼ同様です。コマンドラインオプション`--one_frame_inference`に相当する `--of`と、`--control_image_path`に相当する`--ci`が用意されています。`--ei`は終端画像を指定します。
|
| 161 |
+
|
| 162 |
+
※ `--control_image_path`は`--control_image_path img1.png img2.png`のようにスペースで区切るのに対して、`--ci`は`--ci img1.png --ci img2.png`のように指定するので注意してください。
|
| 163 |
+
|
| 164 |
+
通常の1フレーム学習:
|
| 165 |
+
```
|
| 166 |
+
The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
中間フレームの1フレーム学習(開始画像と終端画像の両方を指定):
|
| 170 |
+
```
|
| 171 |
+
The girl wears a school uniform. --i path/to/start.png --ei path/to/end.png --ci path/to/start.png --ci path/to/end.png --of target_index=1,control_index=0;2 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
</details>
|
| 175 |
+
|
docs/zimage.md
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Z-Image
|
| 2 |
+
|
| 3 |
+
## Overview / 概要
|
| 4 |
+
|
| 5 |
+
This document describes the usage of Z-Image architecture within the Musubi Tuner framework. Z-Image is a model architecture that supports text-to-image generation.
|
| 6 |
+
|
| 7 |
+
Pre-caching, training, and inference options can be found via `--help`. Many options are shared with HunyuanVideo, so refer to the [HunyuanVideo documentation](./hunyuan_video.md) as needed.
|
| 8 |
+
|
| 9 |
+
This feature is experimental.
|
| 10 |
+
|
| 11 |
+
<details>
|
| 12 |
+
<summary>日本語</summary>
|
| 13 |
+
|
| 14 |
+
このドキュメントは、Musubi Tunerフレームワーク内でのZ-Imageアーキテクチャの使用法について説明しています。Z-Imageはテキストから画像を生成することができるモデルアーキテクチャです。Z-Imageは現在蒸留モデルであるTurbo版しかリリースされていないため、学習は不安定です。モデルのダウンロードの項も参照してください。
|
| 15 |
+
|
| 16 |
+
事前キャッシング、学習、推論のオプションは`--help`で確認してください。HunyuanVideoと共通のオプションが多くありますので、必要に応じて[HunyuanVideoのドキュメント](./hunyuan_video.md)も参照してください。
|
| 17 |
+
|
| 18 |
+
この機能は実験的なものです。
|
| 19 |
+
|
| 20 |
+
</details>
|
| 21 |
+
|
| 22 |
+
## Download the model / モデルのダウンロード
|
| 23 |
+
|
| 24 |
+
You need to download the DiT, VAE, and Text Encoder (Qwen3) models.
|
| 25 |
+
|
| 26 |
+
*As of January 2026, the base model has been released. VAE and Text Encoder are the same as the Turbo model, so if you have already downloaded them, there is no need to download them again.*
|
| 27 |
+
|
| 28 |
+
The base version DiT, VAE, and Text Encoder can be obtained from Tongyi-MAI's official repository or ComfyUI weights. You can use either of the following:
|
| 29 |
+
|
| 30 |
+
- **Official Repository**: [Tongyi-MAI/Z-Image](https://huggingface.co/Tongyi-MAI/Z-Image/)
|
| 31 |
+
- For DiT and Text Encoder, download all the split files and specify the first file (e.g., `00001-of-00002.safetensors`) in the arguments.
|
| 32 |
+
- You do not need to download files other than `*.safetensors`.
|
| 33 |
+
- **ComfyUI Weights**: [Comfy-Org/z_image](https://huggingface.co/Comfy-Org/z_image)
|
| 34 |
+
|
| 35 |
+
You need to prepare the following models:
|
| 36 |
+
|
| 37 |
+
- **DiT**: The transformer model.
|
| 38 |
+
- **VAE**: The autoencoder model.
|
| 39 |
+
- **Text Encoder**: Qwen3 model.
|
| 40 |
+
|
| 41 |
+
*The following information is prior to the base model release. Since the base model has been released, it is recommended to use the base model for training.*
|
| 42 |
+
|
| 43 |
+
> If you train Turbo model, it is recommended to use AI Toolkit/ostris's De-Turbo model. Download `z_image_de_turbo_v1_bf16.safetensors` from [ostris/Z-Image-De-Turbo](https://huggingface.co/ostris/Z-Image-De-Turbo) and use it as the DiT model.
|
| 44 |
+
>
|
| 45 |
+
> As another option, you can also use ostris's [ostris/zimage_turbo_training_adapter](https://huggingface.co/ostris/zimage_turbo_training_adapter) to train by combining the Turbo version with an adapter. In this case, download `zimage_turbo_training_adapter_v2.safetensors`, etc., and specify this LoRA weight in the `--base_weights` option during training.
|
| 46 |
+
>
|
| 47 |
+
> We would like to express our deep gratitude to ostris for providing the De-Turbo model and Training Adapter.
|
| 48 |
+
|
| 49 |
+
<details>
|
| 50 |
+
<summary>日本語</summary>
|
| 51 |
+
|
| 52 |
+
DiT, VAE, Text Encoder (Qwen3) のモデルをダウンロードする必要があります。
|
| 53 |
+
|
| 54 |
+
*※2026/1:Baseモデルがリリースされました。VAEとText EncoderはTurboモデルと同じですので、ダウンロード済みの場合は再ダウンロードの必要はありません。*
|
| 55 |
+
|
| 56 |
+
Base版のDiT、VAEとText EncoderはTongyi-MAIの公式リポジトリまたはComfyUI用重みから取得できます。以下のいずれかを使用してください:
|
| 57 |
+
|
| 58 |
+
- **公式リポジトリ**: [Tongyi-MAI/Z-Image](https://huggingface.co/Tongyi-MAI/Z-Image/)
|
| 59 |
+
- DiT、Text Encoderは、分割された複数のファイルをすべてダウンロードし、引数には `00001-of-00002.safetensors` のような最初のファイルを指定してください。
|
| 60 |
+
- `*.safetensors` ファイル以外はダウンロードする必要はありません。
|
| 61 |
+
- **ComfyUI用重み**: [Comfy-Org/z_image](https://huggingface.co/Comfy-Org/z_image)
|
| 62 |
+
|
| 63 |
+
以下のモデルを準備してください:
|
| 64 |
+
|
| 65 |
+
- **DiT**: Transformerモデル。
|
| 66 |
+
- **VAE**: Autoencoderモデル。
|
| 67 |
+
- **Text Encoder**: Qwen3モデル。
|
| 68 |
+
|
| 69 |
+
*以下はBaseモデルリリース前の情報です。Baseモデルがリリースされたため、学習にはBaseモデルの使用をお勧めします。*
|
| 70 |
+
|
| 71 |
+
> Turboモデルの学習を行う場合は、AI Toolkit/ostris氏のDe-Turboモデルを使用することをお勧めします。[ostris/Z-Image-De-Turbo](https://huggingface.co/ostris/Z-Image-De-Turbo) から `z_image_de_turbo_v1_bf16.safetensors` をダウンロードし、DiTモデルとして使用してください。
|
| 72 |
+
>
|
| 73 |
+
> 別のオプションとして、ostris氏の [ostris/zimage_turbo_training_adapter](https://huggingface.co/ostris/zimage_turbo_training_adapter) を使用して、Turbo版とAdapterを組み合わせて学習することもできます。この場合は、`zimage_turbo_training_adapter_v2.safetensors` 等をダウンロードし、学習時に `--base_weights` オプションにこのLoRA重みを指定してください。
|
| 74 |
+
>
|
| 75 |
+
> De-TurboモデルおよびTraining Adapterを提供してくださった ostris 氏に深く感謝します。
|
| 76 |
+
|
| 77 |
+
</details>
|
| 78 |
+
|
| 79 |
+
## Pre-caching / 事前キャッシング
|
| 80 |
+
|
| 81 |
+
### Latent Pre-caching / latentの事前キャッシング
|
| 82 |
+
|
| 83 |
+
Latent pre-caching uses a dedicated script for Z-Image.
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
python src/musubi_tuner/zimage_cache_latents.py \
|
| 87 |
+
--dataset_config path/to/toml \
|
| 88 |
+
--vae path/to/vae_model
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
- Uses `zimage_cache_latents.py`.
|
| 92 |
+
- The dataset should be an image dataset.
|
| 93 |
+
- Z-Image does not support control images, so only target image latents are cached.
|
| 94 |
+
|
| 95 |
+
<details>
|
| 96 |
+
<summary>日本語</summary>
|
| 97 |
+
|
| 98 |
+
latentの事前キャッシングはZ-Image専用のスクリプトを使用します。
|
| 99 |
+
|
| 100 |
+
- `zimage_cache_latents.py`を使用します。
|
| 101 |
+
- データセットは画像データセットである必要があります。
|
| 102 |
+
- Z-Imageはコントロール画像をサポートしていないため、ターゲット画像のlatentのみがキャッシュされます。
|
| 103 |
+
|
| 104 |
+
</details>
|
| 105 |
+
|
| 106 |
+
### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング
|
| 107 |
+
|
| 108 |
+
Text encoder output pre-caching also uses a dedicated script.
|
| 109 |
+
|
| 110 |
+
```bash
|
| 111 |
+
python src/musubi_tuner/zimage_cache_text_encoder_outputs.py \
|
| 112 |
+
--dataset_config path/to/toml \
|
| 113 |
+
--text_encoder path/to/text_encoder \
|
| 114 |
+
--batch_size 16
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
- Uses `zimage_cache_text_encoder_outputs.py`.
|
| 118 |
+
- Requires `--text_encoder` (Qwen3).
|
| 119 |
+
- Use `--fp8_llm` option to run the Text Encoder in fp8 mode for VRAM savings.
|
| 120 |
+
- Larger batch sizes require more VRAM. Adjust `--batch_size` according to your VRAM capacity.
|
| 121 |
+
|
| 122 |
+
<details>
|
| 123 |
+
<summary>日本語</summary>
|
| 124 |
+
|
| 125 |
+
テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。
|
| 126 |
+
|
| 127 |
+
- `zimage_cache_text_encoder_outputs.py`を使用します。
|
| 128 |
+
- `--text_encoder`(Qwen3)が必要です。
|
| 129 |
+
- テキストエンコーダーをfp8モードで実行するための`--fp8_llm`オプションを使用することでVRAMを節約できます。
|
| 130 |
+
- バッチサイズが大きいほど、より多くのVRAMが必要です。VRAM容量に応じて`--batch_size`を調整してください。
|
| 131 |
+
|
| 132 |
+
</details>
|
| 133 |
+
|
| 134 |
+
## Training / 学習
|
| 135 |
+
|
| 136 |
+
Training uses a dedicated script `zimage_train_network.py`.
|
| 137 |
+
|
| 138 |
+
```bash
|
| 139 |
+
accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/zimage_train_network.py \
|
| 140 |
+
--dit path/to/dit_model \
|
| 141 |
+
--vae path/to/vae_model \
|
| 142 |
+
--text_encoder path/to/text_encoder \
|
| 143 |
+
--dataset_config path/to/toml \
|
| 144 |
+
--sdpa --mixed_precision bf16 \
|
| 145 |
+
--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0 \
|
| 146 |
+
--optimizer_type adamw8bit --learning_rate 1e-4 --gradient_checkpointing \
|
| 147 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 148 |
+
--network_module networks.lora_zimage --network_dim 32 \
|
| 149 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 150 |
+
--output_dir path/to/output_dir --output_name name-of-lora
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
- Uses `zimage_train_network.py`.
|
| 154 |
+
- **Requires** specifying `--vae` and `--text_encoder`.
|
| 155 |
+
- **Requires** specifying `--network_module networks.lora_zimage`.
|
| 156 |
+
- It is not yet clear whether `--mixed_precision bf16` or `fp16` is better for Z-Image training.
|
| 157 |
+
- The timestep sampling settings for Z-Image training are unclear, but it may be good to base them on `--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0` and adjust as needed.
|
| 158 |
+
- Memory saving options like `--fp8_base` and `--fp8_scaled` (for DiT) and `--fp8_llm` (for Text Encoder) are available.
|
| 159 |
+
- `--gradient_checkpointing` is available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 160 |
+
|
| 161 |
+
<details>
|
| 162 |
+
<summary>日本語</summary>
|
| 163 |
+
|
| 164 |
+
Z-Imageの学習は専用のスクリプト`zimage_train_network.py`を使用します。
|
| 165 |
+
|
| 166 |
+
コマンド例は英語版を参照してください。
|
| 167 |
+
|
| 168 |
+
- `zimage_train_network.py`を使用します。
|
| 169 |
+
- `--vae`、`--text_encoder`を指定する必要があります。
|
| 170 |
+
- `--network_module networks.lora_zimage`を指定する必要があります。
|
| 171 |
+
- Z-Imageの学習に`--mixed_precision bf16`と`fp16`のどちらが良いかはまだ不明です。
|
| 172 |
+
- Z-Imageのタイムステップサンプリング設定は不明ですが、`--timestep_sampling shift --weighting_scheme none --discrete_flow_shift 2.0`をベースに調整すると良いかもしれません。
|
| 173 |
+
- `--fp8_base`、`--fp8_scaled`(DiT用)や`--fp8_llm`(テキストエンコーダー用)などのメモリ節約オプションが利用可能です。
|
| 174 |
+
- メモリ節約のために`--gradient_checkpointing`が利用可能です。詳細は[HunyuanVideoドキュメント](./hunyuan_video.md#memory-optimization)を参照してください。
|
| 175 |
+
|
| 176 |
+
</details>
|
| 177 |
+
|
| 178 |
+
### Converting LoRA weights to Diffusers format for ComfyUI / LoRA重���をComfyUIで使用可能なDiffusers形式に変換する
|
| 179 |
+
|
| 180 |
+
A script is provided to convert Z-Image LoRA weights to Diffusers format for ComfyUI. LoHa and LoKr formats are supported.
|
| 181 |
+
|
| 182 |
+
```bash
|
| 183 |
+
python src/musubi_tuner/networks/convert_lora.py \
|
| 184 |
+
--input path/to/zimage_lora.safetensors \
|
| 185 |
+
--output path/to/output_diffusers_lora.safetensors \
|
| 186 |
+
--target other
|
| 187 |
+
```
|
| 188 |
+
|
| 189 |
+
- The script is `convert_lora.py`.
|
| 190 |
+
- `--input` argument is the input Z-Image LoRA weights file.
|
| 191 |
+
- `--output` argument is the output Diffusers format LoRA weights file.
|
| 192 |
+
- `--target other` means Diffusers format can be used in ComfyUI.
|
| 193 |
+
|
| 194 |
+
`networks\convert_z_image_lora_to_comfy.py` can also be used for this purpose, but the converted weights may not work correctly with nunchaku.
|
| 195 |
+
|
| 196 |
+
<details>
|
| 197 |
+
<summary>日本語</summary>
|
| 198 |
+
|
| 199 |
+
Z-ImageのLoRA重みをComfyUIで使用できるDiffusers形式に変換するスクリプトが提供されています。
|
| 200 |
+
|
| 201 |
+
- スクリプトは`convert_lora.py`です。
|
| 202 |
+
- `--input`引数は入力のZ-Image LoRA重みファイルです。
|
| 203 |
+
- `--output`引数は出力のDiffusers形式のLoRA重みファイルです。
|
| 204 |
+
- `--target other`はComfyUIで使用できるDiffusers形式を意味します。
|
| 205 |
+
|
| 206 |
+
`networks\convert_z_image_lora_to_comfy.py`もこの目的で使用できますが、変換された重みがnunchakuで正しく動作しない可能性があります。
|
| 207 |
+
|
| 208 |
+
</details>
|
| 209 |
+
|
| 210 |
+
### Memory Optimization
|
| 211 |
+
|
| 212 |
+
- `--fp8_base` and `--fp8_scaled` options are available to reduce memory usage of DiT (specify both together). Quality may degrade slightly.
|
| 213 |
+
- `--fp8_llm` option is available to reduce memory usage of Text Encoder (Qwen3).
|
| 214 |
+
- `--gradient_checkpointing` and `--gradient_checkpointing_cpu_offload` are available for memory savings. See [HunyuanVideo documentation](./hunyuan_video.md#memory-optimization) for details.
|
| 215 |
+
- `--blocks_to_swap` option is available to offload some blocks to CPU. The maximum number of blocks that can be offloaded is 28.
|
| 216 |
+
|
| 217 |
+
<details>
|
| 218 |
+
<summary>日本語</summary>
|
| 219 |
+
|
| 220 |
+
- DiTのメモリ使用量を削減するために、`--fp8_base`と`--fp8_scaled`オプションを指定可能です(同時に指定してください)。品質はやや低下する可能性があります。
|
| 221 |
+
- Text Encoder (Qwen3)のメモリ使用量を削減するために、`--fp8_llm`オプションを指定可能です。
|
| 222 |
+
- メモリ節約のために`--gradient_checkpointing`と`--gradient_checkpointing_cpu_offload`が利用可能です。詳細は[HunyuanVideoドキュメント](./hunyuan_video.md#memory-optimization)を参照してください。
|
| 223 |
+
- `--blocks_to_swap`オプションで、一部のブロックをCPUにオフロードできます。オフロード可能な最大ブロック数は28です。
|
| 224 |
+
|
| 225 |
+
</details>
|
| 226 |
+
|
| 227 |
+
### Attention
|
| 228 |
+
|
| 229 |
+
- `--sdpa` for PyTorch's scaled dot product attention (does not require additional dependencies).
|
| 230 |
+
- `--flash_attn` for [FlashAttention](https://github.com/Dao-AILab/flash-attention).
|
| 231 |
+
- `--xformers` for xformers (requires `--split_attn`).
|
| 232 |
+
- `--sage_attn` for SageAttention (not yet supported for training).
|
| 233 |
+
- `--split_attn` processes attention in chunks, reducing VRAM usage slightly.
|
| 234 |
+
|
| 235 |
+
<details>
|
| 236 |
+
<summary>日本語</summary>
|
| 237 |
+
|
| 238 |
+
- `--sdpa`でPyTorchのscaled dot product attentionを使用(追加の依存ライブラリを必要としません)。
|
| 239 |
+
- `--flash_attn`で[FlashAttention](https://github.com/Dao-AILab/flash-attention)を使用。
|
| 240 |
+
- `--xformers`でxformersの利用も可能(`--split_attn`が必要)。
|
| 241 |
+
- `--sage_attn`でSageAttentionを使用(現時点では学習に未対応)。
|
| 242 |
+
- `--split_attn`を指定すると、attentionを分割して処理し、VRAM使用量をわずかに減らします。
|
| 243 |
+
|
| 244 |
+
</details>
|
| 245 |
+
|
| 246 |
+
### Sample images during training with De-Turbo model or Training Adapter / De-TurboモデルまたはTraining Adapterで学習中にサンプル画像を生成する
|
| 247 |
+
|
| 248 |
+
When training with the De-Turbo model or Training Adapter, add negative prompt and CFG scale to the sampling options to generate sample images with CFG. It is also recommended to increase the number of steps. `--l` specifies the CFG scale (default 4).
|
| 249 |
+
|
| 250 |
+
```text
|
| 251 |
+
A beautiful landscape painting of mountains during sunset. --n bad quality --w 1280 --h 720 --fs 3 --s 20 --d 1234 --l 4
|
| 252 |
+
```
|
| 253 |
+
|
| 254 |
+
<details>
|
| 255 |
+
<summary>日本語</summary>
|
| 256 |
+
|
| 257 |
+
De-TurboモデルまたはTraining Adapterで学習する場合、サンプリングオプションにネガティブプロンプトとCFGスケールを追加して、CFGありでサンプル画像を生成してください。またステップ数も増やすことをお勧めします。`--l`でCFGスケールを指定します(デフォルトは4です)。
|
| 258 |
+
|
| 259 |
+
```text
|
| 260 |
+
A beautiful landscape painting of mountains during sunset. --n bad quality --w 1280 --h 720 --fs 3 --s 20 --d 1234 --l 4
|
| 261 |
+
```
|
| 262 |
+
|
| 263 |
+
</details>
|
| 264 |
+
|
| 265 |
+
## Finetuning
|
| 266 |
+
|
| 267 |
+
Finetuning uses a dedicated script `zimage_train.py`. This script performs full finetuning of the model, not LoRA. Sample usage is as follows:
|
| 268 |
+
|
| 269 |
+
```bash
|
| 270 |
+
accelerate launch --num_cpu_threads_per_process 1 src/musubi_tuner/zimage_train.py \
|
| 271 |
+
--dit path/to/dit_model \
|
| 272 |
+
--vae path/to/vae_model \
|
| 273 |
+
--text_encoder path/to/text_encoder \
|
| 274 |
+
--dataset_config path/to/toml \
|
| 275 |
+
--sdpa --mixed_precision bf16 --gradient_checkpointing \
|
| 276 |
+
--optimizer_type adafactor --learning_rate 1e-6 --fused_backward_pass \
|
| 277 |
+
--optimizer_args "relative_step=False" "scale_parameter=False" "warmup_init=False" \
|
| 278 |
+
--max_grad_norm 0 --lr_scheduler constant_with_warmup --lr_warmup_steps 10 \
|
| 279 |
+
--max_data_loader_n_workers 2 --persistent_data_loader_workers \
|
| 280 |
+
--max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \
|
| 281 |
+
--output_dir path/to/output_dir --output_name name-of-model
|
| 282 |
+
```
|
| 283 |
+
|
| 284 |
+
- Uses `zimage_train.py`.
|
| 285 |
+
- Finetuning requires a large amount of VRAM. The use of memory saving options is strongly recommended.
|
| 286 |
+
- `--full_bf16`: Loads the model weights in bfloat16 format to significantly reduce VRAM usage.
|
| 287 |
+
- `--optimizer_type adafactor`: Using Adafactor is recommended for finetuning.
|
| 288 |
+
- `--fused_backward_pass`: Reduces VRAM usage during the backward pass when using Adafactor.
|
| 289 |
+
- `--mem_eff_save`: Reduces main memory (RAM) usage when saving checkpoints.
|
| 290 |
+
- `--blocks_to_swap`: Swaps model blocks between VRAM and main memory to reduce VRAM usage. This is effective when VRAM is limited.
|
| 291 |
+
- `--disable_numpy_memmap`: Disables numpy memory mapping for model loading, loading with standard file read. Increases RAM usage but may speed up model loading in some cases.
|
| 292 |
+
- `--block_swap_optimizer_patch_params` option is available to patch optimizer parameters for block swapping.
|
| 293 |
+
|
| 294 |
+
`--full_bf16` reduces VRAM usage by about 30GB but may impact model accuracy as the weights are kept in bfloat16. Note that the optimizer state is still kept in float32. In addition, it is recommended to use this with an optimizer that supports stochastic rounding. In this repository, Adafactor optimizer with `--fused_backward_pass` option supports stochastic rounding.
|
| 295 |
+
|
| 296 |
+
`--block_swap_optimizer_patch_params` option moves the gradients to the same device as the parameters during the optimizer step, which makes it work with block swapping. This workaround currently works with AdamW and Adafactor etc. AdamW8bit and other optimizers do not work with this patch due to their specific implementation.
|
| 297 |
+
|
| 298 |
+
When using `--mem_eff_save`, please note that traditional saving methods are still used when saving the optimizer state in `--save_state`, requiring about 20GB of main memory.
|
| 299 |
+
|
| 300 |
+
### Recommended Settings
|
| 301 |
+
|
| 302 |
+
We are still exploring the optimal settings. The configurations above are just examples, so please adjust them as needed. We welcome your feedback.
|
| 303 |
+
|
| 304 |
+
If you have ample VRAM, you can use any optimizer of your choice. `--full_bf16` is not recommended.
|
| 305 |
+
|
| 306 |
+
For limited VRAM environments (e.g., 48GB or less), you can use one of the following options:
|
| 307 |
+
|
| 308 |
+
1. Use `--blocks_to_swap` + `--block_swap_optimizer_patch_params` + compatible optimizer.
|
| 309 |
+
2. Use `--blocks_to_swap` + Adafactor + `--fused_backward_pass`.
|
| 310 |
+
3. Use `--full_bf16` + Adafactor optimizer + `--fused_backward_pass`.
|
| 311 |
+
4. Use `--blocks_to_swap` + `--full_bf16` + Adafactor optimizer + `--fused_backward_pass`.
|
| 312 |
+
|
| 313 |
+
VRAM usage decreases in the order of 1. to 4. (4. being the least). The time taken for training increases in the order of 2. = 3. < 4. < 1. (1. being the slowest). The expected accuracy is in the order of 1. > 2. > 3. = 4. (1. being the highest).
|
| 314 |
+
|
| 315 |
+
The sample configuration is a recommended setting when using option 3. If VRAM is further constrained, you can also use option 4. Adjust `--lr_warmup_steps` to a value between about 10 and 100.
|
| 316 |
+
|
| 317 |
+
`--fused_backward_pass` currently does not support gradient accumulation. Also, since max grad norm may not work as expected, it is recommended to specify `--max_grad_norm 0`.
|
| 318 |
+
|
| 319 |
+
Experience with other models suggests that the learning rate may need to be reduced significantly; something in the range of 1e-6 to 1e-5 might be a good place to start.
|
| 320 |
+
|
| 321 |
+
<details>
|
| 322 |
+
<summary>日本語</summary>
|
| 323 |
+
|
| 324 |
+
Finetuningは専用のスクリプト`zimage_train.py`を使用します。このスクリプトはLoRAではなく、モデル全体のfinetuningを行います。
|
| 325 |
+
|
| 326 |
+
- `zimage_train.py`を使用します。
|
| 327 |
+
- Finetuningは大量のVRAMを必要とします。メモリ節約オプションの使用を強く推奨します。
|
| 328 |
+
- `--full_bf16`: モデルの重みをbfloat16形式で読み込み、VRAM使用量を大幅に削減します。
|
| 329 |
+
- `--optimizer_type adafactor`: FinetuningではAdafactorの使用が推奨されます。
|
| 330 |
+
- `--fused_backward_pass`: Adafactor使用時に、backward pass中のVRAM使用量を削減します。
|
| 331 |
+
- `--mem_eff_save`: チェックポイント保存時のメインメモリ(RAM)使用量を削減します。
|
| 332 |
+
- `--blocks_to_swap`: モデルのブロックをVRAMとメインメモリ間でスワップし、VRAM使用量を削減します。VRAMが少ない場合に有効です。
|
| 333 |
+
- `--disable_numpy_memmap`: モデル読み込み時のnumpyメモリマッピングを無効化し、標準のファイル読み込みで読み込みを行います。RAM使用量は増加しますが、場合によってはモデルの読み込みが高速化されます。
|
| 334 |
+
- `--block_swap_optimizer_patch_params`: ブロックスワッピングのためのオプティマイザパラメータをパッチするためのオプションです。
|
| 335 |
+
|
| 336 |
+
`--full_bf16`はVRAM使用量を約30GB削減しますが、重みがbfloat16で保持されるため、モデルの精度に影響を与える可能性があります。オプティマイザの状態はfloat32で保持されます。また、効率的な学習のために、stochastic roundingをサポートするオプティマイザとの併用が推奨されます。このリポジトリでは、`adafactor`オプティマイザに`--fused_backward_pass`オプションの組み合わせでstochastic roundingをサポートしています。
|
| 337 |
+
|
| 338 |
+
`--block_swap_optimizer_patch_params`オプションにより、オプティマイザステップ中に勾配がパラメータと同じデバイスに移動され、ブロックスワッピングで動作するようになります。この回避策は現在AdamWやAdafactorなどで動作します。オプティマイザの実装に依存するため、AdamW8bitやその他のオプティマイザはこのパッチでは動作しません。
|
| 339 |
+
|
| 340 |
+
`--mem_eff_save`を使用する場合でも、`--save_state`においてはオプティマイザの状態を保存する際に従来の保存方法が依然として使用されるため、約20GBのメインメモリが必要であることに注意してください。
|
| 341 |
+
|
| 342 |
+
### 推奨設定
|
| 343 |
+
|
| 344 |
+
最適な設定はまだ調査中です。上記の構成はあくまで一例ですので、必要に応じて調整してください。フィードバックをお待ちしております。
|
| 345 |
+
|
| 346 |
+
十分なVRAMがある場合は、お好みのオプティマイザを使用できます。`--full_bf16`は推奨されません。
|
| 347 |
+
|
| 348 |
+
VRAMが限られている環境(例:48GB以下)の場合は、次のいずれかのオプションを利用できます。
|
| 349 |
+
|
| 350 |
+
1. `--blocks_to_swap`+`--block_swap_optimizer_patch_params`+互換性のあるオプティマイザを使用する。
|
| 351 |
+
2. `--blocks_to_swap`+Adafactor+`--fused_backward_pass`を使用する。
|
| 352 |
+
3. `--full_bf16`+Adafactorオプティマイザ+`--fused_backward_pass`を使用する。
|
| 353 |
+
4. `--blocks_to_swap`+`--full_bf16`+Adafactorオプティマイザ+`--fused_backward_pass`を使用する。
|
| 354 |
+
|
| 355 |
+
VRAM使用量は1.から4.の順で減少します(4.が最も少ない)。学習にかかる時間は2.=3. < 4. < 1.の順で長くなります(1.が最も遅い)。期待される精度は、1. > 2. > 3. = 4.の順になります(1.が最も高い)。
|
| 356 |
+
|
| 357 |
+
サンプルの設定は、3.のオプションを使用する場合の推奨設定です。VRAMがさらに制約されている場合は、4.のオプションを使用することもできます。`--lr_warmup_steps`は約10から100の間の値に調整してください。
|
| 358 |
+
|
| 359 |
+
現時点では`--fused_backward_pass`はgradient accumulationに対応していません。またmax grad normも想定通りに動作しない可能性があるため、`--max_grad_norm 0`を指定することを推奨します。
|
| 360 |
+
|
| 361 |
+
他のモデルでの経験則では、学習率は大幅に減らす必要があるかもしれません。1e-6から1e-5の範囲で試してみると良いでしょう。
|
| 362 |
+
|
| 363 |
+
</details>
|
| 364 |
+
|
| 365 |
+
## Inference / 推論
|
| 366 |
+
|
| 367 |
+
Inference uses a dedicated script `zimage_generate_image.py`.
|
| 368 |
+
|
| 369 |
+
```bash
|
| 370 |
+
python src/musubi_tuner/zimage_generate_image.py \
|
| 371 |
+
--dit path/to/dit_model \
|
| 372 |
+
--vae path/to/vae_model \
|
| 373 |
+
--text_encoder path/to/text_encoder \
|
| 374 |
+
--prompt "A cat" \
|
| 375 |
+
--image_size 1024 1024 --infer_steps 25 \
|
| 376 |
+
--flow_shift 3.0 --guidance_scale 0.0 \
|
| 377 |
+
--attn_mode torch \
|
| 378 |
+
--save_path path/to/save/dir \
|
| 379 |
+
--seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
- Uses `zimage_generate_image.py`.
|
| 383 |
+
- `--flow_shift` defaults to 3.0.
|
| 384 |
+
- `--guidance_scale` defaults to 0.0 (no classifier-free guidance, for Turbo model). Specify a positive value to enable CFG (4.0 is the offcial default for Base model).
|
| 385 |
+
- `--fp8` and `--fp8_scaled` options are available for DiT.
|
| 386 |
+
- `--fp8_llm` option is available for Text Encoder.
|
| 387 |
+
|
| 388 |
+
<details>
|
| 389 |
+
<summary>日本語</summary>
|
| 390 |
+
|
| 391 |
+
推論は専用のスクリプト`zimage_generate_image.py`を使用します。
|
| 392 |
+
|
| 393 |
+
コマンド例は英語版を参照してください。
|
| 394 |
+
|
| 395 |
+
- `zimage_generate_image.py`を使用します。
|
| 396 |
+
- `--flow_shift`のデフォルトは3.0です。
|
| 397 |
+
- `--guidance_scale`のデフォルトは0.0(Classifier-Free Guidanceなし、Turboモデル用)です。正の値を指定するとCFGが有効になります(Baseモデルの公式デフォルトは4.0です)。
|
| 398 |
+
- `--fp8`および`--fp8_scaled`オプションがDiTで利用可能です。
|
| 399 |
+
- `--fp8_llm`オプションがテキストエンコーダーで利用可能です。
|
| 400 |
+
- `--blocks_to_swap`オプションで、一部のブロックをCPUにオフロードできます。オフロード可能な最���ブロック数は28です。
|
| 401 |
+
- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。
|
| 402 |
+
- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。
|
| 403 |
+
|
| 404 |
+
</details>
|
logs/ltx2_cache_gpu2.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu7.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ltx2_train.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from musubi_tuner.ltx2_train import main
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
main()
|
merge_lora.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.merge_lora import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
qwen_image_train.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_image_train import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
qwen_image_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_image_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
run_cache.sh
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cd /workspace/musubi-tuner || echo "Failed to open directory"
|
| 2 |
+
|
| 3 |
+
LATENT_BATCH_SIZE=12
|
| 4 |
+
LATENT_NUM_WORKERS=6
|
| 5 |
+
|
| 6 |
+
TEXT_BATCH_SIZE=1
|
| 7 |
+
TEXT_NUM_WORKERS=1
|
| 8 |
+
|
| 9 |
+
mkdir -p logs
|
| 10 |
+
|
| 11 |
+
latent_pids=()
|
| 12 |
+
|
| 13 |
+
for i in 0 1 2 3 4 5 6 7; do
|
| 14 |
+
CUDA_VISIBLE_DEVICES=$i python -m musubi_tuner.ltx2_cache_latents \
|
| 15 |
+
--dataset_config "train/ltxxx_gpu${i}.toml" \
|
| 16 |
+
--ltx2_checkpoint "$LTX2_CKPT" \
|
| 17 |
+
--device cuda \
|
| 18 |
+
--vae_dtype bf16 \
|
| 19 |
+
--ltx2_mode av \
|
| 20 |
+
--ltx2_audio_source video \
|
| 21 |
+
--batch_size "$LATENT_BATCH_SIZE" \
|
| 22 |
+
--num_workers "$LATENT_NUM_WORKERS" \
|
| 23 |
+
--skip_existing \
|
| 24 |
+
> "logs/ltx2_cache_latents_gpu${i}.log" 2>&1 &
|
| 25 |
+
latent_pids+=($!)
|
| 26 |
+
done
|
| 27 |
+
|
| 28 |
+
latent_failed=0
|
| 29 |
+
for pid in "${latent_pids[@]}"; do
|
| 30 |
+
if ! wait "$pid"; then
|
| 31 |
+
latent_failed=1
|
| 32 |
+
fi
|
| 33 |
+
done
|
| 34 |
+
|
| 35 |
+
if[ "$latent_failed" -ne 0 ]; then
|
| 36 |
+
echo "Latent caching failed on at least one GPU. Check logs/ltx2_cache_latents_gpu*.log"
|
| 37 |
+
else
|
| 38 |
+
echo "Latent caching finished successfully on all 8 GPUs. Starting text cache..."
|
| 39 |
+
|
| 40 |
+
text_pids=()
|
| 41 |
+
|
| 42 |
+
for i in 0 1 2 3 4 5 6 7; do
|
| 43 |
+
CUDA_VISIBLE_DEVICES=$i python -m musubi_tuner.ltx2_cache_text_encoder_outputs \
|
| 44 |
+
--dataset_config "train/ltxxx_gpu${i}.toml" \
|
| 45 |
+
--ltx2_checkpoint "$LTX2_CKPT" \
|
| 46 |
+
--gemma_root "$GEMMA_ROOT" \
|
| 47 |
+
--gemma_load_in_8bit \
|
| 48 |
+
--device cuda \
|
| 49 |
+
--mixed_precision bf16 \
|
| 50 |
+
--ltx2_mode av \
|
| 51 |
+
--batch_size "$TEXT_BATCH_SIZE" \
|
| 52 |
+
--num_workers "$TEXT_NUM_WORKERS" \
|
| 53 |
+
--skip_existing \
|
| 54 |
+
> "logs/ltx2_cache_text_gpu${i}.log" 2>&1 &
|
| 55 |
+
text_pids+=($!)
|
| 56 |
+
done
|
| 57 |
+
|
| 58 |
+
text_failed=0
|
| 59 |
+
for pid in "${text_pids[@]}"; do
|
| 60 |
+
if ! wait "$pid"; then
|
| 61 |
+
text_failed=1
|
| 62 |
+
fi
|
| 63 |
+
done
|
| 64 |
+
|
| 65 |
+
if[ "$text_failed" -ne 0 ]; then
|
| 66 |
+
echo "Text caching failed on at least one GPU. Check logs/ltx2_cache_text_gpu*.log"
|
| 67 |
+
else
|
| 68 |
+
echo "Latent cache and text cache both completed successfully."
|
| 69 |
+
fi
|
| 70 |
+
fi
|
scripts/merge_dit_to_comfy.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Merge a finetuned DIT checkpoint with the original LTX-2 checkpoint for ComfyUI.
|
| 3 |
+
|
| 4 |
+
Takes a DIT-only checkpoint (model.X keys) saved by ltx2_train.py and:
|
| 5 |
+
1. Renames keys: model.X -> model.diffusion_model.X
|
| 6 |
+
2. Optionally merges with the original checkpoint to produce a complete file
|
| 7 |
+
(VAE, audio VAE, vocoder, text_embedding_projection, etc.)
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python scripts/merge_dit_to_comfy.py ^
|
| 11 |
+
--dit_checkpoint output/ltx2_finetune-step00000100.safetensors ^
|
| 12 |
+
--original_checkpoint E:/ComfyUI_windows_portable/ComfyUI/models/checkpoints/ltx-2-19b-dev.safetensors ^
|
| 13 |
+
--output merged_comfy.safetensors
|
| 14 |
+
|
| 15 |
+
# Rename keys only (no merge, smaller file):
|
| 16 |
+
python scripts/merge_dit_to_comfy.py ^
|
| 17 |
+
--dit_checkpoint output/ltx2_finetune-step00000100.safetensors ^
|
| 18 |
+
--output comfy_dit_only.safetensors
|
| 19 |
+
"""
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import sys
|
| 24 |
+
import os
|
| 25 |
+
|
| 26 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src"))
|
| 27 |
+
|
| 28 |
+
from tqdm import tqdm
|
| 29 |
+
from safetensors.torch import load_file, save_file
|
| 30 |
+
from musubi_tuner.utils.safetensors_utils import MemoryEfficientSafeOpen
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main():
|
| 34 |
+
parser = argparse.ArgumentParser(description="Merge finetuned DIT with original LTX-2 checkpoint for ComfyUI")
|
| 35 |
+
parser.add_argument("--dit_checkpoint", type=str, required=True, help="Path to finetuned DIT checkpoint")
|
| 36 |
+
parser.add_argument("--original_checkpoint", type=str, default=None, help="Path to original LTX-2 checkpoint (for merging)")
|
| 37 |
+
parser.add_argument("--output", type=str, required=True, help="Output path for the merged/converted checkpoint")
|
| 38 |
+
args = parser.parse_args()
|
| 39 |
+
|
| 40 |
+
# Load finetuned DIT
|
| 41 |
+
print(f"Loading finetuned DIT: {args.dit_checkpoint}")
|
| 42 |
+
dit_sd = load_file(args.dit_checkpoint, device="cpu")
|
| 43 |
+
print(f" {len(dit_sd)} keys loaded")
|
| 44 |
+
|
| 45 |
+
# Rename keys: model.X -> model.diffusion_model.X
|
| 46 |
+
print("Renaming keys to ComfyUI format...")
|
| 47 |
+
renamed = {}
|
| 48 |
+
renamed_count = 0
|
| 49 |
+
for key, value in dit_sd.items():
|
| 50 |
+
if key.startswith("model."):
|
| 51 |
+
renamed["model.diffusion_model." + key[len("model."):]] = value
|
| 52 |
+
renamed_count += 1
|
| 53 |
+
else:
|
| 54 |
+
renamed[key] = value
|
| 55 |
+
del dit_sd
|
| 56 |
+
print(f" Renamed {renamed_count} keys (model.X -> model.diffusion_model.X)")
|
| 57 |
+
|
| 58 |
+
extra_metadata = {}
|
| 59 |
+
|
| 60 |
+
# Merge with original checkpoint if provided
|
| 61 |
+
if args.original_checkpoint:
|
| 62 |
+
print(f"Merging with original checkpoint: {args.original_checkpoint}")
|
| 63 |
+
with MemoryEfficientSafeOpen(args.original_checkpoint) as f:
|
| 64 |
+
all_keys = f.keys()
|
| 65 |
+
missing_keys = [k for k in all_keys if k not in renamed]
|
| 66 |
+
print(f" {len(missing_keys)} non-overlapping keys to copy from original")
|
| 67 |
+
|
| 68 |
+
# Restore original dtypes for overlapping keys (e.g. scale_shift_table F32 -> BF16 from --full_bf16)
|
| 69 |
+
dtype_fixed = 0
|
| 70 |
+
for key in all_keys:
|
| 71 |
+
if key in renamed:
|
| 72 |
+
orig_dtype = f.header[key]["dtype"]
|
| 73 |
+
cur_dtype = renamed[key].dtype
|
| 74 |
+
# Map safetensors dtype string to torch dtype for comparison
|
| 75 |
+
st_to_torch = {"F32": "torch.float32", "F16": "torch.float16", "BF16": "torch.bfloat16"}
|
| 76 |
+
if st_to_torch.get(orig_dtype) and str(cur_dtype) != st_to_torch[orig_dtype]:
|
| 77 |
+
orig_tensor = f.get_tensor(key)
|
| 78 |
+
renamed[key] = renamed[key].to(orig_tensor.dtype)
|
| 79 |
+
dtype_fixed += 1
|
| 80 |
+
if dtype_fixed:
|
| 81 |
+
print(f" Restored original dtype for {dtype_fixed} keys")
|
| 82 |
+
|
| 83 |
+
for key in tqdm(missing_keys, desc=" Copying"):
|
| 84 |
+
renamed[key] = f.get_tensor(key)
|
| 85 |
+
orig_meta = f.metadata()
|
| 86 |
+
if orig_meta and "config" in orig_meta:
|
| 87 |
+
extra_metadata["config"] = orig_meta["config"]
|
| 88 |
+
print(f" Merged checkpoint has {len(renamed)} keys")
|
| 89 |
+
else:
|
| 90 |
+
print("No --original_checkpoint provided, saving DIT-only with ComfyUI key format")
|
| 91 |
+
|
| 92 |
+
# Copy training metadata from the DIT checkpoint
|
| 93 |
+
with MemoryEfficientSafeOpen(args.dit_checkpoint) as f:
|
| 94 |
+
dit_meta = f.metadata()
|
| 95 |
+
if dit_meta:
|
| 96 |
+
extra_metadata.update(dit_meta)
|
| 97 |
+
|
| 98 |
+
# Save
|
| 99 |
+
print(f"Saving to: {args.output}")
|
| 100 |
+
os.makedirs(os.path.dirname(args.output) or ".", exist_ok=True)
|
| 101 |
+
save_file(renamed, args.output, extra_metadata if extra_metadata else None)
|
| 102 |
+
size_gb = os.path.getsize(args.output) / (1024**3)
|
| 103 |
+
print(f"Done! Output size: {size_gb:.2f} GB")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
main()
|
src/musubi_tuner/audio_io_utils.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def coerce_decoded_audio_to_channels_first(audio: np.ndarray, channels: Optional[int] = None) -> np.ndarray:
|
| 9 |
+
"""Normalize decoded audio arrays to [channels, samples] layout.
|
| 10 |
+
|
| 11 |
+
Decoders may return:
|
| 12 |
+
- packed 1D interleaved data: [L0, R0, L1, R1, ...]
|
| 13 |
+
- packed 2D sample-major: [samples, channels]
|
| 14 |
+
- planar 2D channel-major: [channels, samples]
|
| 15 |
+
"""
|
| 16 |
+
arr = np.asarray(audio)
|
| 17 |
+
|
| 18 |
+
if channels is not None:
|
| 19 |
+
channels = int(channels)
|
| 20 |
+
if channels <= 0:
|
| 21 |
+
channels = None
|
| 22 |
+
|
| 23 |
+
if arr.ndim == 1:
|
| 24 |
+
if channels is not None and channels > 1 and arr.size % channels == 0:
|
| 25 |
+
return arr.reshape(-1, channels).T
|
| 26 |
+
return arr.reshape(1, -1)
|
| 27 |
+
|
| 28 |
+
if arr.ndim != 2:
|
| 29 |
+
raise ValueError(f"Unexpected audio ndarray shape: {arr.shape}")
|
| 30 |
+
|
| 31 |
+
if channels is not None:
|
| 32 |
+
if arr.shape[0] == channels:
|
| 33 |
+
return arr
|
| 34 |
+
if arr.shape[1] == channels:
|
| 35 |
+
return arr.T
|
| 36 |
+
# Packed interleaved in 2D: PyAV returns (1, samples*channels) for packed formats.
|
| 37 |
+
# arr.shape[0] is the number of planes (1), not the channel count.
|
| 38 |
+
if arr.shape[0] < arr.shape[1] and arr.shape[0] != channels:
|
| 39 |
+
return arr.reshape(-1, channels).T
|
| 40 |
+
|
| 41 |
+
# Fallback heuristic when channel count is unknown:
|
| 42 |
+
# if first axis is larger, treat as [samples, channels].
|
| 43 |
+
if arr.shape[0] > arr.shape[1]:
|
| 44 |
+
return arr.T
|
| 45 |
+
return arr
|
| 46 |
+
|
src/musubi_tuner/audio_loss_balance.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def update_audio_presence_ema(audio_presence_ema: float, balance_beta: float, has_audio_loss: bool) -> float:
|
| 5 |
+
"""Update EMA for audio-batch frequency."""
|
| 6 |
+
audio_presence = 1.0 if has_audio_loss else 0.0
|
| 7 |
+
ema = (1.0 - balance_beta) * float(audio_presence_ema) + balance_beta * audio_presence
|
| 8 |
+
return min(max(ema, 0.0), 1.0)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def compute_inverse_frequency_audio_weight(
|
| 12 |
+
base_audio_weight: float,
|
| 13 |
+
audio_presence_ema: float,
|
| 14 |
+
balance_eps: float,
|
| 15 |
+
balance_min: float,
|
| 16 |
+
balance_max: float,
|
| 17 |
+
) -> float:
|
| 18 |
+
"""Compute inverse-frequency-scaled and clamped audio loss weight."""
|
| 19 |
+
denom = max(float(audio_presence_ema), float(balance_eps))
|
| 20 |
+
weight = float(base_audio_weight) / denom
|
| 21 |
+
return min(max(weight, float(balance_min)), float(balance_max))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def update_loss_ema(loss_ema: float, loss_value: float, ema_decay: float) -> float:
|
| 25 |
+
"""Update EMA for scalar loss values."""
|
| 26 |
+
decay = float(ema_decay)
|
| 27 |
+
value = float(loss_value)
|
| 28 |
+
ema = decay * float(loss_ema) + (1.0 - decay) * value
|
| 29 |
+
return max(ema, 1e-12)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def compute_ema_magnitude_audio_weight(
|
| 33 |
+
base_audio_weight: float,
|
| 34 |
+
audio_loss_ema: float,
|
| 35 |
+
video_loss_ema: float,
|
| 36 |
+
target_audio_ratio: float,
|
| 37 |
+
balance_min: float,
|
| 38 |
+
balance_max: float,
|
| 39 |
+
) -> float:
|
| 40 |
+
"""Scale audio weight to match a target audio/video loss magnitude ratio."""
|
| 41 |
+
target_audio_loss = float(target_audio_ratio) * max(float(video_loss_ema), 1e-12)
|
| 42 |
+
dynamic_scale = target_audio_loss / max(float(audio_loss_ema), 1e-12)
|
| 43 |
+
weight = float(base_audio_weight) * dynamic_scale
|
| 44 |
+
return min(max(weight, float(balance_min)), float(balance_max))
|
| 45 |
+
|
src/musubi_tuner/audio_supervision.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Optional
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class AudioSupervisionState:
|
| 9 |
+
expected_batches: int = 0
|
| 10 |
+
supervised_batches: int = 0
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class AudioSupervisionAlert:
|
| 15 |
+
ratio: float
|
| 16 |
+
min_ratio: float
|
| 17 |
+
expected_batches: int
|
| 18 |
+
supervised_batches: int
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def normalize_audio_supervision_mode(raw_mode: Any) -> str:
|
| 22 |
+
if isinstance(raw_mode, bool):
|
| 23 |
+
return "warn" if raw_mode else "off"
|
| 24 |
+
mode = str(raw_mode or "off").strip().lower()
|
| 25 |
+
if mode in {"off", "none", "false", "0", "disable", "disabled"}:
|
| 26 |
+
return "off"
|
| 27 |
+
if mode in {"warn", "warning"}:
|
| 28 |
+
return "warn"
|
| 29 |
+
if mode in {"error", "fail", "raise"}:
|
| 30 |
+
return "error"
|
| 31 |
+
raise ValueError(f"audio_supervision_mode must be one of ['off', 'warn', 'error']. Got: {raw_mode}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def reset_audio_supervision_state(state: AudioSupervisionState) -> None:
|
| 35 |
+
state.expected_batches = 0
|
| 36 |
+
state.supervised_batches = 0
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def update_and_check_audio_supervision(
|
| 40 |
+
state: AudioSupervisionState,
|
| 41 |
+
*,
|
| 42 |
+
mode: str,
|
| 43 |
+
warmup_steps: int,
|
| 44 |
+
check_interval: int,
|
| 45 |
+
min_ratio: float,
|
| 46 |
+
audio_expected_for_batch: bool,
|
| 47 |
+
audio_supervised_for_batch: bool,
|
| 48 |
+
) -> Optional[AudioSupervisionAlert]:
|
| 49 |
+
if mode == "off":
|
| 50 |
+
return None
|
| 51 |
+
|
| 52 |
+
if audio_expected_for_batch:
|
| 53 |
+
state.expected_batches += 1
|
| 54 |
+
if audio_supervised_for_batch:
|
| 55 |
+
state.supervised_batches += 1
|
| 56 |
+
|
| 57 |
+
expected = state.expected_batches
|
| 58 |
+
if expected <= 0:
|
| 59 |
+
return None
|
| 60 |
+
if expected < int(warmup_steps):
|
| 61 |
+
return None
|
| 62 |
+
if int(check_interval) <= 0:
|
| 63 |
+
return None
|
| 64 |
+
if expected % int(check_interval) != 0:
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
supervised = state.supervised_batches
|
| 68 |
+
ratio = float(supervised) / float(expected)
|
| 69 |
+
if ratio >= float(min_ratio):
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
return AudioSupervisionAlert(
|
| 73 |
+
ratio=ratio,
|
| 74 |
+
min_ratio=float(min_ratio),
|
| 75 |
+
expected_batches=expected,
|
| 76 |
+
supervised_batches=supervised,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def format_audio_supervision_alert(alert: AudioSupervisionAlert) -> str:
|
| 81 |
+
return (
|
| 82 |
+
"Audio supervision monitor: low supervised-audio ratio "
|
| 83 |
+
f"({alert.ratio:.3f} < {alert.min_ratio:.3f}, "
|
| 84 |
+
f"supervised={alert.supervised_batches}, expected={alert.expected_batches}). "
|
| 85 |
+
"Audio prediction is likely under-trained because AV batches are missing audio latents."
|
| 86 |
+
)
|
src/musubi_tuner/audio_utils.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import torchaudio
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def time_stretch_preserve_pitch(waveform: torch.Tensor, sample_rate: int, target_samples: int) -> torch.Tensor:
|
| 9 |
+
"""
|
| 10 |
+
Pitch-preserving time stretch using STFT phase vocoder.
|
| 11 |
+
|
| 12 |
+
waveform: [C, L] float tensor (CPU or GPU)
|
| 13 |
+
returns: [C, target_samples] float tensor
|
| 14 |
+
"""
|
| 15 |
+
if waveform.dim() == 1:
|
| 16 |
+
waveform = waveform.unsqueeze(0)
|
| 17 |
+
|
| 18 |
+
waveform = waveform.to(torch.float32)
|
| 19 |
+
|
| 20 |
+
src_len = waveform.shape[-1]
|
| 21 |
+
if src_len == 0 or target_samples <= 0:
|
| 22 |
+
return waveform[..., :0]
|
| 23 |
+
|
| 24 |
+
if src_len == target_samples:
|
| 25 |
+
return waveform
|
| 26 |
+
|
| 27 |
+
# rate > 1.0 speeds up (shorter), rate < 1.0 slows down (longer)
|
| 28 |
+
rate = float(src_len) / float(target_samples)
|
| 29 |
+
|
| 30 |
+
# Use sample_rate to pick STFT params
|
| 31 |
+
win_seconds = 0.046
|
| 32 |
+
hop_seconds = 0.0115
|
| 33 |
+
|
| 34 |
+
n_fft_target = int(sample_rate * win_seconds)
|
| 35 |
+
n_fft = 1 << max(8, int(math.floor(math.log2(max(256, n_fft_target))))) # >=256, pow2
|
| 36 |
+
win_length = n_fft
|
| 37 |
+
hop_length = max(64, int(sample_rate * hop_seconds))
|
| 38 |
+
hop_length = min(hop_length, win_length // 2)
|
| 39 |
+
|
| 40 |
+
window = torch.hann_window(win_length, device=waveform.device, dtype=waveform.dtype)
|
| 41 |
+
|
| 42 |
+
stft = torch.stft(
|
| 43 |
+
waveform,
|
| 44 |
+
n_fft=n_fft,
|
| 45 |
+
hop_length=hop_length,
|
| 46 |
+
win_length=win_length,
|
| 47 |
+
window=window,
|
| 48 |
+
center=True,
|
| 49 |
+
return_complex=True,
|
| 50 |
+
) # [C, F, T] complex
|
| 51 |
+
|
| 52 |
+
# n_freq must match STFT's frequency bins (n_fft//2 + 1)
|
| 53 |
+
stretcher = torchaudio.transforms.TimeStretch(
|
| 54 |
+
n_freq=stft.shape[-2],
|
| 55 |
+
hop_length=hop_length,
|
| 56 |
+
fixed_rate=rate,
|
| 57 |
+
).to(waveform.device)
|
| 58 |
+
|
| 59 |
+
stft_stretched = stretcher(stft) # [C, F, T']
|
| 60 |
+
|
| 61 |
+
stretched = torch.istft(
|
| 62 |
+
stft_stretched,
|
| 63 |
+
n_fft=n_fft,
|
| 64 |
+
hop_length=hop_length,
|
| 65 |
+
win_length=win_length,
|
| 66 |
+
window=window,
|
| 67 |
+
center=True,
|
| 68 |
+
length=target_samples,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if stretched.shape[-1] > target_samples:
|
| 72 |
+
stretched = stretched[..., :target_samples]
|
| 73 |
+
elif stretched.shape[-1] < target_samples:
|
| 74 |
+
stretched = F.pad(stretched, (0, target_samples - stretched.shape[-1]))
|
| 75 |
+
|
| 76 |
+
return stretched
|
src/musubi_tuner/cache_latents.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
|
| 9 |
+
from musubi_tuner.dataset import config_utils
|
| 10 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 11 |
+
from PIL import Image
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
from musubi_tuner.dataset.image_video_dataset import BaseDataset, ItemInfo, save_latent_cache, ARCHITECTURE_HUNYUAN_VIDEO
|
| 16 |
+
from musubi_tuner.hunyuan_model.vae import load_vae
|
| 17 |
+
from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 18 |
+
from musubi_tuner.utils.model_utils import str_to_dtype
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
logging.basicConfig(level=logging.INFO)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def show_image(
|
| 25 |
+
image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]],
|
| 26 |
+
control_image: Optional[Union[np.ndarray, list[np.ndarray]]] = None,
|
| 27 |
+
) -> int:
|
| 28 |
+
import cv2
|
| 29 |
+
|
| 30 |
+
imgs = (
|
| 31 |
+
[image]
|
| 32 |
+
if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image)
|
| 33 |
+
else [image[0], image[-1]]
|
| 34 |
+
)
|
| 35 |
+
if len(imgs) > 1:
|
| 36 |
+
print(f"Number of images: {len(image)}")
|
| 37 |
+
|
| 38 |
+
if control_image is not None:
|
| 39 |
+
if isinstance(control_image, np.ndarray):
|
| 40 |
+
control_image = [control_image]
|
| 41 |
+
for ci in control_image:
|
| 42 |
+
imgs.append(ci)
|
| 43 |
+
print(f"Number of control images: {len(control_image)}")
|
| 44 |
+
|
| 45 |
+
for i, img in enumerate(imgs):
|
| 46 |
+
if len(imgs) > 1:
|
| 47 |
+
print(f"Image {i + 1} of {len(imgs)}: {img.shape}")
|
| 48 |
+
else:
|
| 49 |
+
print(f"Image: {img.shape}")
|
| 50 |
+
cv2_img = np.array(img) if isinstance(img, Image.Image) else img
|
| 51 |
+
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR)
|
| 52 |
+
cv2.imshow("image", cv2_img)
|
| 53 |
+
k = cv2.waitKey(0)
|
| 54 |
+
cv2.destroyAllWindows()
|
| 55 |
+
if k == ord("q") or k == ord("d"):
|
| 56 |
+
return k
|
| 57 |
+
return k
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def show_console(
|
| 61 |
+
image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]],
|
| 62 |
+
width: int,
|
| 63 |
+
back: str,
|
| 64 |
+
interactive: bool = False,
|
| 65 |
+
control_image: Optional[Union[np.ndarray, list[np.ndarray]]] = None,
|
| 66 |
+
) -> int:
|
| 67 |
+
from ascii_magic import from_pillow_image, Back
|
| 68 |
+
|
| 69 |
+
back = None
|
| 70 |
+
if back is not None:
|
| 71 |
+
back = getattr(Back, back.upper())
|
| 72 |
+
|
| 73 |
+
k = None
|
| 74 |
+
imgs = (
|
| 75 |
+
[image]
|
| 76 |
+
if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image)
|
| 77 |
+
else [image[0], image[-1]]
|
| 78 |
+
)
|
| 79 |
+
if len(imgs) > 1:
|
| 80 |
+
print(f"Number of images: {len(image)}")
|
| 81 |
+
|
| 82 |
+
if control_image is not None:
|
| 83 |
+
if isinstance(control_image, np.ndarray):
|
| 84 |
+
control_image = [control_image]
|
| 85 |
+
for ci in control_image:
|
| 86 |
+
imgs.append(ci)
|
| 87 |
+
print(f"Number of control images: {len(control_image)}")
|
| 88 |
+
|
| 89 |
+
for i, img in enumerate(imgs):
|
| 90 |
+
if len(imgs) > 1:
|
| 91 |
+
print(f"Image {i + 1} of {len(imgs)}: {img.shape}")
|
| 92 |
+
else:
|
| 93 |
+
print(f"Image: {img.shape}")
|
| 94 |
+
pil_img = img if isinstance(img, Image.Image) else Image.fromarray(img)
|
| 95 |
+
ascii_img = from_pillow_image(pil_img)
|
| 96 |
+
ascii_img.to_terminal(columns=width, back=back)
|
| 97 |
+
|
| 98 |
+
if interactive:
|
| 99 |
+
k = input("Press q to quit, d to next dataset, other key to next: ")
|
| 100 |
+
if k == "q" or k == "d":
|
| 101 |
+
return ord(k)
|
| 102 |
+
|
| 103 |
+
if not interactive:
|
| 104 |
+
return ord(" ")
|
| 105 |
+
return ord(k) if k else ord(" ")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def save_video(image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]], cache_path: str, fps: int = 24):
|
| 109 |
+
import av
|
| 110 |
+
|
| 111 |
+
directory = os.path.dirname(cache_path)
|
| 112 |
+
if not os.path.exists(directory):
|
| 113 |
+
os.makedirs(directory)
|
| 114 |
+
|
| 115 |
+
if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image):
|
| 116 |
+
# save image
|
| 117 |
+
image_path = cache_path.replace(".safetensors", ".jpg")
|
| 118 |
+
img = image if isinstance(image, Image.Image) else Image.fromarray(image)
|
| 119 |
+
img.save(image_path)
|
| 120 |
+
print(f"Saved image: {image_path}")
|
| 121 |
+
else:
|
| 122 |
+
imgs = image
|
| 123 |
+
print(f"Number of images: {len(imgs)}")
|
| 124 |
+
# save video
|
| 125 |
+
video_path = cache_path.replace(".safetensors", ".mp4")
|
| 126 |
+
height, width = imgs[0].shape[0:2]
|
| 127 |
+
|
| 128 |
+
# create output container
|
| 129 |
+
container = av.open(video_path, mode="w")
|
| 130 |
+
|
| 131 |
+
# create video stream
|
| 132 |
+
codec = "libx264"
|
| 133 |
+
pixel_format = "yuv420p"
|
| 134 |
+
stream = container.add_stream(codec, rate=fps)
|
| 135 |
+
stream.width = width
|
| 136 |
+
stream.height = height
|
| 137 |
+
stream.pix_fmt = pixel_format
|
| 138 |
+
stream.bit_rate = 1000000 # 1Mbit/s for preview quality
|
| 139 |
+
|
| 140 |
+
for frame_img in imgs:
|
| 141 |
+
if isinstance(frame_img, Image.Image):
|
| 142 |
+
frame = av.VideoFrame.from_image(frame_img)
|
| 143 |
+
else:
|
| 144 |
+
frame = av.VideoFrame.from_ndarray(frame_img, format="rgb24")
|
| 145 |
+
packets = stream.encode(frame)
|
| 146 |
+
for packet in packets:
|
| 147 |
+
container.mux(packet)
|
| 148 |
+
|
| 149 |
+
for packet in stream.encode():
|
| 150 |
+
container.mux(packet)
|
| 151 |
+
|
| 152 |
+
container.close()
|
| 153 |
+
|
| 154 |
+
print(f"Saved video: {video_path}")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def show_datasets(
|
| 158 |
+
datasets: list[BaseDataset],
|
| 159 |
+
debug_mode: str,
|
| 160 |
+
console_width: int,
|
| 161 |
+
console_back: str,
|
| 162 |
+
console_num_images: Optional[int],
|
| 163 |
+
fps: int = 24,
|
| 164 |
+
):
|
| 165 |
+
if debug_mode != "video":
|
| 166 |
+
print("d: next dataset, q: quit")
|
| 167 |
+
|
| 168 |
+
num_workers = max(1, os.cpu_count() - 1)
|
| 169 |
+
for i, dataset in enumerate(datasets):
|
| 170 |
+
print(f"Dataset [{i}]")
|
| 171 |
+
batch_index = 0
|
| 172 |
+
num_images_to_show = console_num_images
|
| 173 |
+
k = None
|
| 174 |
+
for key, batch in dataset.retrieve_latent_cache_batches(num_workers):
|
| 175 |
+
print(f"bucket resolution: {key}, count: {len(batch)}")
|
| 176 |
+
for j, item_info in enumerate(batch):
|
| 177 |
+
item_info: ItemInfo
|
| 178 |
+
print(f"{batch_index}-{j}: {item_info}")
|
| 179 |
+
if debug_mode == "image":
|
| 180 |
+
k = show_image(item_info.content, item_info.control_content)
|
| 181 |
+
elif debug_mode == "console":
|
| 182 |
+
k = show_console(
|
| 183 |
+
item_info.content, console_width, console_back, console_num_images is None, item_info.control_content
|
| 184 |
+
)
|
| 185 |
+
if num_images_to_show is not None:
|
| 186 |
+
num_images_to_show -= 1
|
| 187 |
+
if num_images_to_show == 0:
|
| 188 |
+
k = ord("d") # next dataset
|
| 189 |
+
elif debug_mode == "video":
|
| 190 |
+
save_video(item_info.content, item_info.latent_cache_path, fps)
|
| 191 |
+
k = None # save next video
|
| 192 |
+
|
| 193 |
+
if k == ord("q"):
|
| 194 |
+
return
|
| 195 |
+
elif k == ord("d"):
|
| 196 |
+
break
|
| 197 |
+
if k == ord("d"):
|
| 198 |
+
break
|
| 199 |
+
batch_index += 1
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def preprocess_contents(batch: list[ItemInfo]) -> tuple[int, int, torch.Tensor]:
|
| 203 |
+
# item.content: target image (H, W, C)
|
| 204 |
+
# item.control_content: list of images (H, W, C)
|
| 205 |
+
|
| 206 |
+
# Stack batch into tensor (B,F,H,W,C) in RGB order. The numbers of control content for each item are the same.
|
| 207 |
+
contents = []
|
| 208 |
+
content_masks: list[list[Optional[torch.Tensor]]] = []
|
| 209 |
+
for item in batch:
|
| 210 |
+
item_contents = item.control_content + [item.content]
|
| 211 |
+
|
| 212 |
+
item_masks = []
|
| 213 |
+
for i, c in enumerate(item_contents):
|
| 214 |
+
if c.shape[-1] == 4: # RGBA
|
| 215 |
+
item_contents[i] = c[..., :3] # remove alpha channel from content
|
| 216 |
+
|
| 217 |
+
alpha = c[..., 3] # extract alpha channel
|
| 218 |
+
mask_image = Image.fromarray(alpha, mode="L")
|
| 219 |
+
width, height = mask_image.size
|
| 220 |
+
mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS)
|
| 221 |
+
mask_image = np.array(mask_image) # PIL to numpy, HWC
|
| 222 |
+
mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC
|
| 223 |
+
mask_image = mask_image.squeeze(-1) # HWC -> HW
|
| 224 |
+
mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (BCFHW)
|
| 225 |
+
mask_image = mask_image.to(torch.float32)
|
| 226 |
+
content_mask = mask_image
|
| 227 |
+
else:
|
| 228 |
+
content_mask = None
|
| 229 |
+
|
| 230 |
+
item_masks.append(content_mask)
|
| 231 |
+
|
| 232 |
+
item_contents = [torch.from_numpy(c) for c in item_contents]
|
| 233 |
+
contents.append(torch.stack(item_contents, dim=0)) # list of [F, H, W, C]
|
| 234 |
+
content_masks.append(item_masks)
|
| 235 |
+
|
| 236 |
+
contents = torch.stack(contents, dim=0) # B, F, H, W, C. F is control frames + target frame
|
| 237 |
+
|
| 238 |
+
contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W
|
| 239 |
+
contents = contents / 127.5 - 1.0 # normalize to [-1, 1]
|
| 240 |
+
|
| 241 |
+
height, width = contents.shape[-2], contents.shape[-1]
|
| 242 |
+
if height < 8 or width < 8:
|
| 243 |
+
item = batch[0] # other items should have the same size
|
| 244 |
+
raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}")
|
| 245 |
+
|
| 246 |
+
return height, width, contents, content_masks
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def encode_and_save_batch(vae: AutoencoderKLCausal3D, batch: list[ItemInfo]):
|
| 250 |
+
contents = torch.stack([torch.from_numpy(item.content) for item in batch])
|
| 251 |
+
if len(contents.shape) == 4:
|
| 252 |
+
contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C
|
| 253 |
+
|
| 254 |
+
contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W
|
| 255 |
+
contents = contents.to(vae.device, dtype=vae.dtype)
|
| 256 |
+
contents = contents / 127.5 - 1.0 # normalize to [-1, 1]
|
| 257 |
+
|
| 258 |
+
h, w = contents.shape[3], contents.shape[4]
|
| 259 |
+
if h < 8 or w < 8:
|
| 260 |
+
item = batch[0] # other items should have the same size
|
| 261 |
+
raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}")
|
| 262 |
+
|
| 263 |
+
# print(f"encode batch: {contents.shape}")
|
| 264 |
+
with torch.no_grad():
|
| 265 |
+
latent = vae.encode(contents).latent_dist.sample()
|
| 266 |
+
# latent = latent * vae.config.scaling_factor
|
| 267 |
+
|
| 268 |
+
# # debug: decode and save
|
| 269 |
+
# with torch.no_grad():
|
| 270 |
+
# latent_to_decode = latent / vae.config.scaling_factor
|
| 271 |
+
# images = vae.decode(latent_to_decode, return_dict=False)[0]
|
| 272 |
+
# images = (images / 2 + 0.5).clamp(0, 1)
|
| 273 |
+
# images = images.cpu().float().numpy()
|
| 274 |
+
# images = (images * 255).astype(np.uint8)
|
| 275 |
+
# images = images.transpose(0, 2, 3, 4, 1) # B, C, F, H, W -> B, F, H, W, C
|
| 276 |
+
# for b in range(images.shape[0]):
|
| 277 |
+
# for f in range(images.shape[1]):
|
| 278 |
+
# fln = os.path.splitext(os.path.basename(batch[b].item_key))[0]
|
| 279 |
+
# img = Image.fromarray(images[b, f])
|
| 280 |
+
# img.save(f"./logs/decode_{fln}_{b}_{f:03d}.jpg")
|
| 281 |
+
|
| 282 |
+
for item, l in zip(batch, latent):
|
| 283 |
+
# print(f"save latent cache: {item.latent_cache_path}, latent shape: {l.shape}")
|
| 284 |
+
save_latent_cache(item, l)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def encode_datasets(datasets: list[BaseDataset], encode: callable, args: argparse.Namespace, supports_alpha: bool = False):
|
| 288 |
+
"""Common function to encode datasets. This function is called from multiple architecture scripts."""
|
| 289 |
+
num_workers = args.num_workers if args.num_workers is not None else max(1, os.cpu_count() - 1)
|
| 290 |
+
for i, dataset in enumerate(datasets):
|
| 291 |
+
logger.info(f"Encoding dataset [{i}]")
|
| 292 |
+
all_latent_cache_paths = []
|
| 293 |
+
for _, batch in tqdm(dataset.retrieve_latent_cache_batches(num_workers)):
|
| 294 |
+
batch: list[ItemInfo] = batch
|
| 295 |
+
if not supports_alpha:
|
| 296 |
+
# make sure content has 3 channels
|
| 297 |
+
for item in batch:
|
| 298 |
+
if item.content is None:
|
| 299 |
+
# audio-only datasets have no visual content
|
| 300 |
+
continue
|
| 301 |
+
if isinstance(item.content, np.ndarray):
|
| 302 |
+
if item.content.shape[-1] == 4:
|
| 303 |
+
item.content = item.content[..., :3]
|
| 304 |
+
else:
|
| 305 |
+
item.content = [img[..., :3] if img.shape[-1] == 4 else img for img in item.content]
|
| 306 |
+
|
| 307 |
+
all_latent_cache_paths.extend([item.latent_cache_path for item in batch])
|
| 308 |
+
|
| 309 |
+
if args.skip_existing:
|
| 310 |
+
filtered_batch = [item for item in batch if not os.path.exists(item.latent_cache_path)]
|
| 311 |
+
if len(filtered_batch) == 0:
|
| 312 |
+
continue
|
| 313 |
+
batch = filtered_batch
|
| 314 |
+
|
| 315 |
+
bs = args.batch_size if args.batch_size is not None else len(batch)
|
| 316 |
+
for i in range(0, len(batch), bs):
|
| 317 |
+
encode(batch[i : i + bs])
|
| 318 |
+
|
| 319 |
+
# normalize paths
|
| 320 |
+
all_latent_cache_paths = [os.path.normpath(p) for p in all_latent_cache_paths]
|
| 321 |
+
all_latent_cache_paths = set(all_latent_cache_paths)
|
| 322 |
+
|
| 323 |
+
# remove old cache files not in the dataset
|
| 324 |
+
all_cache_files = dataset.get_all_latent_cache_files()
|
| 325 |
+
for cache_file in all_cache_files:
|
| 326 |
+
if os.path.normpath(cache_file) not in all_latent_cache_paths:
|
| 327 |
+
if args.keep_cache:
|
| 328 |
+
logger.info(f"Keep cache file not in the dataset: {cache_file}")
|
| 329 |
+
else:
|
| 330 |
+
os.remove(cache_file)
|
| 331 |
+
logger.info(f"Removed old cache file: {cache_file}")
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def main():
|
| 335 |
+
parser = setup_parser_common()
|
| 336 |
+
parser = hv_setup_parser(parser)
|
| 337 |
+
|
| 338 |
+
args = parser.parse_args()
|
| 339 |
+
|
| 340 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 341 |
+
device = torch.device(device)
|
| 342 |
+
|
| 343 |
+
# Load dataset config
|
| 344 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 345 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 346 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 347 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_HUNYUAN_VIDEO)
|
| 348 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 349 |
+
if args.save_dataset_manifest:
|
| 350 |
+
manifest = config_utils.create_cache_only_dataset_manifest(
|
| 351 |
+
user_config,
|
| 352 |
+
args,
|
| 353 |
+
architecture=ARCHITECTURE_HUNYUAN_VIDEO,
|
| 354 |
+
source_dataset_config=args.dataset_config,
|
| 355 |
+
)
|
| 356 |
+
manifest_path = config_utils.save_dataset_manifest(manifest, args.save_dataset_manifest)
|
| 357 |
+
logger.info(f"Saved cache-only dataset manifest: {manifest_path}")
|
| 358 |
+
|
| 359 |
+
datasets = train_dataset_group.datasets
|
| 360 |
+
|
| 361 |
+
if args.debug_mode is not None:
|
| 362 |
+
show_datasets(datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images)
|
| 363 |
+
return
|
| 364 |
+
|
| 365 |
+
assert args.vae is not None, "vae checkpoint is required"
|
| 366 |
+
|
| 367 |
+
# Load VAE model: HunyuanVideo VAE model is float16
|
| 368 |
+
vae_dtype = torch.float16 if args.vae_dtype is None else str_to_dtype(args.vae_dtype)
|
| 369 |
+
vae, _, s_ratio, t_ratio = load_vae(vae_dtype=vae_dtype, device=device, vae_path=args.vae)
|
| 370 |
+
vae.eval()
|
| 371 |
+
logger.info(f"Loaded VAE: {vae.config}, dtype: {vae.dtype}")
|
| 372 |
+
|
| 373 |
+
if args.vae_chunk_size is not None:
|
| 374 |
+
vae.set_chunk_size_for_causal_conv_3d(args.vae_chunk_size)
|
| 375 |
+
logger.info(f"Set chunk_size to {args.vae_chunk_size} for CausalConv3d in VAE")
|
| 376 |
+
if args.vae_spatial_tile_sample_min_size is not None:
|
| 377 |
+
vae.enable_spatial_tiling(True)
|
| 378 |
+
vae.tile_sample_min_size = args.vae_spatial_tile_sample_min_size
|
| 379 |
+
vae.tile_latent_min_size = args.vae_spatial_tile_sample_min_size // 8
|
| 380 |
+
elif args.vae_tiling:
|
| 381 |
+
vae.enable_spatial_tiling(True)
|
| 382 |
+
|
| 383 |
+
# Encode images
|
| 384 |
+
def encode(one_batch: list[ItemInfo]):
|
| 385 |
+
encode_and_save_batch(vae, one_batch)
|
| 386 |
+
|
| 387 |
+
encode_datasets(datasets, encode, args)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def setup_parser_common() -> argparse.ArgumentParser:
|
| 391 |
+
parser = argparse.ArgumentParser()
|
| 392 |
+
|
| 393 |
+
parser.add_argument("--dataset_config", type=str, required=True, help="path to dataset config .toml file")
|
| 394 |
+
parser.add_argument(
|
| 395 |
+
"--save_dataset_manifest",
|
| 396 |
+
type=str,
|
| 397 |
+
default=None,
|
| 398 |
+
help="optional path to write a cache-only dataset manifest JSON for source-free training",
|
| 399 |
+
)
|
| 400 |
+
parser.add_argument("--vae", type=str, required=False, default=None, help="path to vae checkpoint")
|
| 401 |
+
parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default depends on model, e.g., float16")
|
| 402 |
+
parser.add_argument("--device", type=str, default=None, help="device to use, default is cuda if available")
|
| 403 |
+
parser.add_argument(
|
| 404 |
+
"--batch_size", type=int, default=None, help="batch size, override dataset config if dataset batch size > this"
|
| 405 |
+
)
|
| 406 |
+
parser.add_argument("--num_workers", type=int, default=None, help="number of workers for dataset. default is cpu count-1")
|
| 407 |
+
parser.add_argument("--skip_existing", action="store_true", help="skip existing cache files")
|
| 408 |
+
parser.add_argument("--keep_cache", action="store_true", help="keep cache files not in dataset")
|
| 409 |
+
parser.add_argument("--debug_mode", type=str, default=None, choices=["image", "console", "video"], help="debug mode")
|
| 410 |
+
parser.add_argument("--console_width", type=int, default=80, help="debug mode: console width")
|
| 411 |
+
parser.add_argument(
|
| 412 |
+
"--console_back", type=str, default=None, help="debug mode: console background color, one of ascii_magic.Back"
|
| 413 |
+
)
|
| 414 |
+
parser.add_argument(
|
| 415 |
+
"--console_num_images",
|
| 416 |
+
type=int,
|
| 417 |
+
default=None,
|
| 418 |
+
help="debug mode: not interactive, number of images to show for each dataset",
|
| 419 |
+
)
|
| 420 |
+
parser.add_argument(
|
| 421 |
+
"--disable_cudnn_backend", action="store_true", help="Disable CUDNN PyTorch backend. May be useful for AMD GPUs."
|
| 422 |
+
)
|
| 423 |
+
return parser
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def hv_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 427 |
+
parser.add_argument(
|
| 428 |
+
"--vae_tiling",
|
| 429 |
+
action="store_true",
|
| 430 |
+
help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled",
|
| 431 |
+
)
|
| 432 |
+
parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE")
|
| 433 |
+
parser.add_argument(
|
| 434 |
+
"--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256"
|
| 435 |
+
)
|
| 436 |
+
return parser
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
if __name__ == "__main__":
|
| 440 |
+
main()
|
src/musubi_tuner/cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
from musubi_tuner.dataset import config_utils
|
| 9 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 10 |
+
import accelerate
|
| 11 |
+
|
| 12 |
+
from musubi_tuner.dataset.image_video_dataset import (
|
| 13 |
+
ARCHITECTURE_HUNYUAN_VIDEO,
|
| 14 |
+
BaseDataset,
|
| 15 |
+
ItemInfo,
|
| 16 |
+
save_text_encoder_output_cache,
|
| 17 |
+
)
|
| 18 |
+
from musubi_tuner.hunyuan_model import text_encoder as text_encoder_module
|
| 19 |
+
from musubi_tuner.hunyuan_model.text_encoder import TextEncoder
|
| 20 |
+
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
from musubi_tuner.utils.model_utils import str_to_dtype
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
logging.basicConfig(level=logging.INFO)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def encode_prompt(text_encoder: TextEncoder, prompt: Union[str, list[str]]):
|
| 30 |
+
data_type = "video" # video only, image is not supported
|
| 31 |
+
text_inputs = text_encoder.text2tokens(prompt, data_type=data_type)
|
| 32 |
+
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
prompt_outputs = text_encoder.encode(text_inputs, data_type=data_type)
|
| 35 |
+
|
| 36 |
+
return prompt_outputs.hidden_state, prompt_outputs.attention_mask
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def encode_and_save_batch(
|
| 40 |
+
text_encoder: TextEncoder, batch: list[ItemInfo], is_llm: bool, accelerator: Optional[accelerate.Accelerator]
|
| 41 |
+
):
|
| 42 |
+
prompts = [item.caption for item in batch]
|
| 43 |
+
# print(prompts)
|
| 44 |
+
|
| 45 |
+
# encode prompt
|
| 46 |
+
if accelerator is not None:
|
| 47 |
+
with accelerator.autocast():
|
| 48 |
+
prompt_embeds, prompt_mask = encode_prompt(text_encoder, prompts)
|
| 49 |
+
else:
|
| 50 |
+
prompt_embeds, prompt_mask = encode_prompt(text_encoder, prompts)
|
| 51 |
+
|
| 52 |
+
# # convert to fp16 if needed
|
| 53 |
+
# if prompt_embeds.dtype == torch.float32 and text_encoder.dtype != torch.float32:
|
| 54 |
+
# prompt_embeds = prompt_embeds.to(text_encoder.dtype)
|
| 55 |
+
|
| 56 |
+
# save prompt cache
|
| 57 |
+
for item, embed, mask in zip(batch, prompt_embeds, prompt_mask):
|
| 58 |
+
save_text_encoder_output_cache(item, embed, mask, is_llm)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def prepare_cache_files_and_paths(datasets: list[BaseDataset]):
|
| 62 |
+
all_cache_files_for_dataset = [] # exisiting cache files
|
| 63 |
+
all_cache_paths_for_dataset = [] # all cache paths in the dataset
|
| 64 |
+
for dataset in datasets:
|
| 65 |
+
all_cache_files = [os.path.normpath(file) for file in dataset.get_all_text_encoder_output_cache_files()]
|
| 66 |
+
all_cache_files = set(all_cache_files)
|
| 67 |
+
all_cache_files_for_dataset.append(all_cache_files)
|
| 68 |
+
|
| 69 |
+
all_cache_paths_for_dataset.append(set())
|
| 70 |
+
return all_cache_files_for_dataset, all_cache_paths_for_dataset
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def process_text_encoder_batches(
|
| 74 |
+
num_workers: Optional[int],
|
| 75 |
+
skip_existing: bool,
|
| 76 |
+
batch_size: int,
|
| 77 |
+
datasets: list[BaseDataset],
|
| 78 |
+
all_cache_files_for_dataset: list[set],
|
| 79 |
+
all_cache_paths_for_dataset: list[set],
|
| 80 |
+
encode: callable,
|
| 81 |
+
requires_content: Optional[bool] = False,
|
| 82 |
+
):
|
| 83 |
+
"""
|
| 84 |
+
Architecture independent processing of text encoder batches.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
num_workers = num_workers if num_workers is not None else max(1, os.cpu_count() - 1)
|
| 88 |
+
for i, dataset in enumerate(datasets):
|
| 89 |
+
logger.info(f"Encoding dataset [{i}]")
|
| 90 |
+
all_cache_files = all_cache_files_for_dataset[i]
|
| 91 |
+
all_cache_paths = all_cache_paths_for_dataset[i]
|
| 92 |
+
|
| 93 |
+
if not requires_content:
|
| 94 |
+
batches = dataset.retrieve_text_encoder_output_cache_batches(num_workers) # return captions only
|
| 95 |
+
else:
|
| 96 |
+
batches = dataset.retrieve_latent_cache_batches(num_workers) # return captions and images/videos
|
| 97 |
+
|
| 98 |
+
for batch in tqdm(batches):
|
| 99 |
+
# update cache files (it's ok if we update it multiple times)
|
| 100 |
+
if requires_content:
|
| 101 |
+
batch = batch[1] # batch is (key, items), so use items
|
| 102 |
+
all_cache_paths.update([os.path.normpath(item.text_encoder_output_cache_path) for item in batch])
|
| 103 |
+
|
| 104 |
+
# skip existing cache files
|
| 105 |
+
if skip_existing:
|
| 106 |
+
filtered_batch = [
|
| 107 |
+
item for item in batch if os.path.normpath(item.text_encoder_output_cache_path) not in all_cache_files
|
| 108 |
+
]
|
| 109 |
+
# print(f"Filtered {len(batch) - len(filtered_batch)} existing cache files")
|
| 110 |
+
if len(filtered_batch) == 0:
|
| 111 |
+
continue
|
| 112 |
+
batch = filtered_batch
|
| 113 |
+
|
| 114 |
+
bs = batch_size if batch_size is not None else len(batch)
|
| 115 |
+
for i in range(0, len(batch), bs):
|
| 116 |
+
encode(batch[i : i + bs])
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def post_process_cache_files(
|
| 120 |
+
datasets: list[BaseDataset], all_cache_files_for_dataset: list[set], all_cache_paths_for_dataset: list[set], keep_cache: bool
|
| 121 |
+
):
|
| 122 |
+
for i, dataset in enumerate(datasets):
|
| 123 |
+
all_cache_files = all_cache_files_for_dataset[i]
|
| 124 |
+
all_cache_paths = all_cache_paths_for_dataset[i]
|
| 125 |
+
for cache_file in all_cache_files:
|
| 126 |
+
if cache_file not in all_cache_paths:
|
| 127 |
+
if keep_cache:
|
| 128 |
+
logger.info(f"Keep cache file not in the dataset: {cache_file}")
|
| 129 |
+
else:
|
| 130 |
+
os.remove(cache_file)
|
| 131 |
+
logger.info(f"Removed old cache file: {cache_file}")
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def main():
|
| 135 |
+
parser = setup_parser_common()
|
| 136 |
+
parser = hv_setup_parser(parser)
|
| 137 |
+
|
| 138 |
+
args = parser.parse_args()
|
| 139 |
+
|
| 140 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 141 |
+
device = torch.device(device)
|
| 142 |
+
|
| 143 |
+
# Load dataset config
|
| 144 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 145 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 146 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 147 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_HUNYUAN_VIDEO)
|
| 148 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 149 |
+
|
| 150 |
+
datasets = train_dataset_group.datasets
|
| 151 |
+
|
| 152 |
+
# define accelerator for fp8 inference
|
| 153 |
+
accelerator = None
|
| 154 |
+
if args.fp8_llm:
|
| 155 |
+
accelerator = accelerate.Accelerator(mixed_precision="fp16")
|
| 156 |
+
|
| 157 |
+
# prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset
|
| 158 |
+
all_cache_files_for_dataset, all_cache_paths_for_dataset = prepare_cache_files_and_paths(datasets)
|
| 159 |
+
|
| 160 |
+
# Load Text Encoder 1
|
| 161 |
+
text_encoder_dtype = torch.float16 if args.text_encoder_dtype is None else str_to_dtype(args.text_encoder_dtype)
|
| 162 |
+
logger.info(f"loading text encoder 1: {args.text_encoder1}")
|
| 163 |
+
text_encoder_1 = text_encoder_module.load_text_encoder_1(args.text_encoder1, device, args.fp8_llm, text_encoder_dtype)
|
| 164 |
+
text_encoder_1.to(device=device)
|
| 165 |
+
|
| 166 |
+
# Encode with Text Encoder 1 (LLM)
|
| 167 |
+
logger.info("Encoding with Text Encoder 1")
|
| 168 |
+
|
| 169 |
+
def encode_for_text_encoder_1(batch: list[ItemInfo]):
|
| 170 |
+
nonlocal text_encoder_1
|
| 171 |
+
encode_and_save_batch(text_encoder_1, batch, is_llm=True, accelerator=accelerator)
|
| 172 |
+
|
| 173 |
+
process_text_encoder_batches(
|
| 174 |
+
args.num_workers,
|
| 175 |
+
args.skip_existing,
|
| 176 |
+
args.batch_size,
|
| 177 |
+
datasets,
|
| 178 |
+
all_cache_files_for_dataset,
|
| 179 |
+
all_cache_paths_for_dataset,
|
| 180 |
+
encode_for_text_encoder_1,
|
| 181 |
+
)
|
| 182 |
+
del text_encoder_1
|
| 183 |
+
|
| 184 |
+
# Load Text Encoder 2
|
| 185 |
+
logger.info(f"loading text encoder 2: {args.text_encoder2}")
|
| 186 |
+
text_encoder_2 = text_encoder_module.load_text_encoder_2(args.text_encoder2, device, text_encoder_dtype)
|
| 187 |
+
text_encoder_2.to(device=device)
|
| 188 |
+
|
| 189 |
+
# Encode with Text Encoder 2
|
| 190 |
+
logger.info("Encoding with Text Encoder 2")
|
| 191 |
+
|
| 192 |
+
def encode_for_text_encoder_2(batch: list[ItemInfo]):
|
| 193 |
+
nonlocal text_encoder_2
|
| 194 |
+
encode_and_save_batch(text_encoder_2, batch, is_llm=False, accelerator=None)
|
| 195 |
+
|
| 196 |
+
process_text_encoder_batches(
|
| 197 |
+
args.num_workers,
|
| 198 |
+
args.skip_existing,
|
| 199 |
+
args.batch_size,
|
| 200 |
+
datasets,
|
| 201 |
+
all_cache_files_for_dataset,
|
| 202 |
+
all_cache_paths_for_dataset,
|
| 203 |
+
encode_for_text_encoder_2,
|
| 204 |
+
)
|
| 205 |
+
del text_encoder_2
|
| 206 |
+
|
| 207 |
+
# remove cache files not in dataset
|
| 208 |
+
post_process_cache_files(datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def setup_parser_common():
|
| 212 |
+
parser = argparse.ArgumentParser()
|
| 213 |
+
|
| 214 |
+
parser.add_argument("--dataset_config", type=str, required=True, help="path to dataset config .toml file")
|
| 215 |
+
parser.add_argument("--device", type=str, default=None, help="device to use, default is cuda if available")
|
| 216 |
+
parser.add_argument(
|
| 217 |
+
"--batch_size", type=int, default=None, help="batch size, override dataset config if dataset batch size > this"
|
| 218 |
+
)
|
| 219 |
+
parser.add_argument("--num_workers", type=int, default=None, help="number of workers for dataset. default is cpu count-1")
|
| 220 |
+
parser.add_argument("--skip_existing", action="store_true", help="skip existing cache files")
|
| 221 |
+
parser.add_argument("--keep_cache", action="store_true", help="keep cache files not in dataset")
|
| 222 |
+
return parser
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def hv_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 226 |
+
parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory")
|
| 227 |
+
parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory")
|
| 228 |
+
parser.add_argument("--text_encoder_dtype", type=str, default=None, help="data type for Text Encoder, default is float16")
|
| 229 |
+
parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)")
|
| 230 |
+
return parser
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
if __name__ == "__main__":
|
| 234 |
+
main()
|
src/musubi_tuner/caption_images_by_qwen_vl.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
import math
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from transformers import AutoProcessor
|
| 11 |
+
|
| 12 |
+
from musubi_tuner.dataset import image_video_dataset
|
| 13 |
+
from musubi_tuner.qwen_image.qwen_image_utils import load_qwen2_5_vl
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
IMAGE_FACTOR = 28 # The image size must be divisible by this factor
|
| 22 |
+
DEFAULT_MAX_SIZE = 1280
|
| 23 |
+
|
| 24 |
+
DEFAULT_PROMPT = """# Image Annotator
|
| 25 |
+
You are a professional image annotator. Please complete the following task based on the input image.
|
| 26 |
+
## Create Image Caption
|
| 27 |
+
1. Write the caption using natural, descriptive text without structured formats or rich text.
|
| 28 |
+
2. Enrich caption details by including: object attributes, vision relations between objects, and environmental details.
|
| 29 |
+
3. Identify the text visible in the image, without translation or explanation, and highlight it in the caption with quotation marks.
|
| 30 |
+
4. Maintain authenticity and accuracy, avoid generalizations."""
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def parse_args():
|
| 34 |
+
"""Parse command line arguments"""
|
| 35 |
+
parser = argparse.ArgumentParser(description="Generate captions for images using Qwen2.5-VL")
|
| 36 |
+
|
| 37 |
+
parser.add_argument("--image_dir", type=str, required=True, help="Path to directory containing images")
|
| 38 |
+
parser.add_argument("--model_path", type=str, required=True, help="Path to Qwen2.5-VL model")
|
| 39 |
+
parser.add_argument("--output_file", type=str, required=False, help="Output JSONL file path (required for 'jsonl' format)")
|
| 40 |
+
parser.add_argument("--max_new_tokens", type=int, default=1024, help="Maximum number of new tokens to generate (default: 1024)")
|
| 41 |
+
parser.add_argument(
|
| 42 |
+
"--prompt", type=str, default=DEFAULT_PROMPT, help="Custom prompt for caption generation (supports \\n for newlines)"
|
| 43 |
+
)
|
| 44 |
+
parser.add_argument(
|
| 45 |
+
"--max_size",
|
| 46 |
+
type=int,
|
| 47 |
+
default=DEFAULT_MAX_SIZE,
|
| 48 |
+
help=f"Maximum image size (default: {DEFAULT_MAX_SIZE}). The images are resized to fit the total pixel area within (max_size x max_size)",
|
| 49 |
+
)
|
| 50 |
+
parser.add_argument("--fp8_vl", action="store_true", help="Load Qwen2.5-VL model in fp8 precision")
|
| 51 |
+
parser.add_argument(
|
| 52 |
+
"--output_format",
|
| 53 |
+
type=str,
|
| 54 |
+
choices=["jsonl", "text"],
|
| 55 |
+
default="jsonl",
|
| 56 |
+
help="Output format: 'jsonl' for JSONL file or 'text' for individual text files (default: jsonl)",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
return parser.parse_args()
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def load_model_and_processor(model_path: str, device: torch.device, max_size: int = DEFAULT_MAX_SIZE, fp8_vl: bool = False):
|
| 63 |
+
"""Load Qwen2.5-VL model and processor"""
|
| 64 |
+
logger.info(f"Loading model from: {model_path}")
|
| 65 |
+
|
| 66 |
+
min_pixels = 256 * IMAGE_FACTOR * IMAGE_FACTOR # this means 256x256 is the minimum input size
|
| 67 |
+
max_pixels = max_size * IMAGE_FACTOR * IMAGE_FACTOR
|
| 68 |
+
|
| 69 |
+
# We don't have configs in model_path, so we use defaults from Hugging Face
|
| 70 |
+
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
| 71 |
+
|
| 72 |
+
# Use load_qwen2_5_vl function from qwen_image_utils
|
| 73 |
+
dtype = torch.float8_e4m3fn if fp8_vl else torch.bfloat16
|
| 74 |
+
_, model = load_qwen2_5_vl(model_path, dtype=dtype, device=device, disable_mmap=True)
|
| 75 |
+
|
| 76 |
+
model.eval()
|
| 77 |
+
|
| 78 |
+
logger.info(f"Model loaded successfully on device: {model.device}")
|
| 79 |
+
return processor, model
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def resize_image(image: Image.Image, max_size: int = DEFAULT_MAX_SIZE) -> Image.Image:
|
| 83 |
+
"""Resize image to a suitable resolution"""
|
| 84 |
+
min_area = 256 * 256
|
| 85 |
+
max_area = max_size * max_size
|
| 86 |
+
width, height = image.size
|
| 87 |
+
width_rounded = int((width / IMAGE_FACTOR) + 0.5) * IMAGE_FACTOR
|
| 88 |
+
height_rounded = int((height / IMAGE_FACTOR) + 0.5) * IMAGE_FACTOR
|
| 89 |
+
|
| 90 |
+
bucket_resos = []
|
| 91 |
+
if width_rounded * height_rounded < min_area:
|
| 92 |
+
# Scale up to min area
|
| 93 |
+
scale_factor = math.sqrt(min_area / (width_rounded * height_rounded))
|
| 94 |
+
new_width = math.ceil(width * scale_factor / IMAGE_FACTOR) * IMAGE_FACTOR
|
| 95 |
+
new_height = math.ceil(height * scale_factor / IMAGE_FACTOR) * IMAGE_FACTOR
|
| 96 |
+
|
| 97 |
+
# Add to bucket resolutions: default and slight variations for keeping aspect ratio
|
| 98 |
+
bucket_resos.append((new_width, new_height))
|
| 99 |
+
bucket_resos.append((new_width + IMAGE_FACTOR, new_height))
|
| 100 |
+
bucket_resos.append((new_width, new_height + IMAGE_FACTOR))
|
| 101 |
+
elif width_rounded * height_rounded > max_area:
|
| 102 |
+
# Scale down to max area
|
| 103 |
+
scale_factor = math.sqrt(max_area / (width_rounded * height_rounded))
|
| 104 |
+
new_width = math.floor(width * scale_factor / IMAGE_FACTOR) * IMAGE_FACTOR
|
| 105 |
+
new_height = math.floor(height * scale_factor / IMAGE_FACTOR) * IMAGE_FACTOR
|
| 106 |
+
|
| 107 |
+
# Add to bucket resolutions: default and slight variations for keeping aspect ratio
|
| 108 |
+
bucket_resos.append((new_width, new_height))
|
| 109 |
+
bucket_resos.append((new_width - IMAGE_FACTOR, new_height))
|
| 110 |
+
bucket_resos.append((new_width, new_height - IMAGE_FACTOR))
|
| 111 |
+
else:
|
| 112 |
+
# Keep original resolution, but add slight variations for keeping aspect ratio
|
| 113 |
+
bucket_resos.append((width_rounded, height_rounded))
|
| 114 |
+
bucket_resos.append((width_rounded - IMAGE_FACTOR, height_rounded))
|
| 115 |
+
bucket_resos.append((width_rounded, height_rounded - IMAGE_FACTOR))
|
| 116 |
+
bucket_resos.append((width_rounded + IMAGE_FACTOR, height_rounded))
|
| 117 |
+
bucket_resos.append((width_rounded, height_rounded + IMAGE_FACTOR))
|
| 118 |
+
|
| 119 |
+
# Min/max area filtering
|
| 120 |
+
bucket_resos = [(w, h) for w, h in bucket_resos if w * h >= min_area and w * h <= max_area]
|
| 121 |
+
|
| 122 |
+
# Select bucket which has a nearest aspect ratio
|
| 123 |
+
aspect_ratio = width / height
|
| 124 |
+
bucket_resos.sort(key=lambda x: abs((x[0] / x[1]) - aspect_ratio))
|
| 125 |
+
bucket_reso = bucket_resos[0]
|
| 126 |
+
|
| 127 |
+
# Resize to bucket
|
| 128 |
+
image_np = image_video_dataset.resize_image_to_bucket(image, bucket_reso)
|
| 129 |
+
|
| 130 |
+
# Convert back to PIL
|
| 131 |
+
image = Image.fromarray(image_np)
|
| 132 |
+
return image
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def generate_caption(
|
| 136 |
+
processor,
|
| 137 |
+
model,
|
| 138 |
+
image_path: str,
|
| 139 |
+
device: torch.device,
|
| 140 |
+
max_new_tokens: int,
|
| 141 |
+
prompt: str = DEFAULT_PROMPT,
|
| 142 |
+
max_size: int = DEFAULT_MAX_SIZE,
|
| 143 |
+
fp8_vl: bool = False,
|
| 144 |
+
) -> str:
|
| 145 |
+
"""Generate caption for a single image"""
|
| 146 |
+
# Load and process image
|
| 147 |
+
image = Image.open(image_path).convert("RGB")
|
| 148 |
+
|
| 149 |
+
# Prepare messages
|
| 150 |
+
messages = [
|
| 151 |
+
{
|
| 152 |
+
"role": "user",
|
| 153 |
+
"content": [
|
| 154 |
+
{"type": "image", "image": image},
|
| 155 |
+
{"type": "text", "text": prompt},
|
| 156 |
+
],
|
| 157 |
+
}
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
# Preparation for inference
|
| 161 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 162 |
+
image_inputs = resize_image(image, max_size=max_size)
|
| 163 |
+
inputs = processor(text=[text], images=image_inputs, padding=True, return_tensors="pt")
|
| 164 |
+
inputs = inputs.to(device)
|
| 165 |
+
|
| 166 |
+
# Generate caption with fp8 support
|
| 167 |
+
if fp8_vl:
|
| 168 |
+
with torch.no_grad(), torch.autocast(device_type=device.type, dtype=torch.bfloat16):
|
| 169 |
+
generated_ids = model.generate(**inputs, max_new_tokens=max_new_tokens, pad_token_id=processor.tokenizer.eos_token_id)
|
| 170 |
+
else:
|
| 171 |
+
with torch.no_grad():
|
| 172 |
+
generated_ids = model.generate(**inputs, max_new_tokens=max_new_tokens, pad_token_id=processor.tokenizer.eos_token_id)
|
| 173 |
+
|
| 174 |
+
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
|
| 175 |
+
caption = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 176 |
+
|
| 177 |
+
# Return as string instead of list
|
| 178 |
+
return caption[0] if caption else ""
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def process_images(args):
|
| 182 |
+
"""Main processing function"""
|
| 183 |
+
# Validate arguments
|
| 184 |
+
if args.output_format == "jsonl" and not args.output_file:
|
| 185 |
+
raise ValueError("--output_file is required when --output_format is 'jsonl'")
|
| 186 |
+
|
| 187 |
+
# Process custom prompt - replace \n with actual newlines
|
| 188 |
+
if args.prompt:
|
| 189 |
+
args.prompt = args.prompt.replace("\\n", "\n")
|
| 190 |
+
prompt = args.prompt
|
| 191 |
+
else:
|
| 192 |
+
prompt = DEFAULT_PROMPT
|
| 193 |
+
|
| 194 |
+
# Set device
|
| 195 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 196 |
+
logger.info(f"Using device: {device}")
|
| 197 |
+
logger.info(f"Output format: {args.output_format}")
|
| 198 |
+
if args.fp8_vl:
|
| 199 |
+
logger.info("Using fp8 precision for model")
|
| 200 |
+
|
| 201 |
+
# Get image files
|
| 202 |
+
image_files = image_video_dataset.glob_images(args.image_dir)
|
| 203 |
+
logger.info(f"Found {len(image_files)} image files")
|
| 204 |
+
|
| 205 |
+
# Load model and processor
|
| 206 |
+
processor, model = load_model_and_processor(args.model_path, device, args.max_size, args.fp8_vl)
|
| 207 |
+
|
| 208 |
+
# Create output directory if needed for JSONL format
|
| 209 |
+
if args.output_format == "jsonl":
|
| 210 |
+
output_path = Path(args.output_file)
|
| 211 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 212 |
+
|
| 213 |
+
# Process images and write results
|
| 214 |
+
if args.output_format == "jsonl":
|
| 215 |
+
# JSONL output format
|
| 216 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
| 217 |
+
for image_path in tqdm(image_files, desc="Generating captions"):
|
| 218 |
+
caption = generate_caption(
|
| 219 |
+
processor, model, image_path, device, args.max_new_tokens, prompt, args.max_size, args.fp8_vl
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# Create JSONL entry
|
| 223 |
+
entry = {"image_path": image_path, "caption": caption}
|
| 224 |
+
|
| 225 |
+
# Write to file
|
| 226 |
+
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
| 227 |
+
f.flush() # Ensure data is written immediately
|
| 228 |
+
|
| 229 |
+
logger.info(f"Caption generation completed. Results saved to: {args.output_file}")
|
| 230 |
+
|
| 231 |
+
else:
|
| 232 |
+
# Text file output format
|
| 233 |
+
for image_path in tqdm(image_files, desc="Generating captions"):
|
| 234 |
+
caption = generate_caption(
|
| 235 |
+
processor, model, image_path, device, args.max_new_tokens, prompt, args.max_size, args.fp8_vl
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# Generate text file path: same directory as image, with .txt extension
|
| 239 |
+
image_path_obj = Path(image_path)
|
| 240 |
+
text_file_path = image_path_obj.with_suffix(".txt")
|
| 241 |
+
|
| 242 |
+
# Write caption to text file
|
| 243 |
+
with open(text_file_path, "w", encoding="utf-8") as f:
|
| 244 |
+
f.write(caption)
|
| 245 |
+
|
| 246 |
+
logger.info("Caption generation completed. Text files saved alongside each image.")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def main():
|
| 250 |
+
"""Main function"""
|
| 251 |
+
args = parse_args()
|
| 252 |
+
process_images(args)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
if __name__ == "__main__":
|
| 256 |
+
main()
|
src/musubi_tuner/convert_lora.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from safetensors.torch import load_file, save_file
|
| 5 |
+
from safetensors import safe_open
|
| 6 |
+
from musubi_tuner.utils import model_utils
|
| 7 |
+
|
| 8 |
+
import logging
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
logging.basicConfig(level=logging.INFO)
|
| 13 |
+
|
| 14 |
+
# keys of Qwen-Image state dict
|
| 15 |
+
QWEN_IMAGE_KEYS = [
|
| 16 |
+
"time_text_embed.timestep_embedder.linear_1",
|
| 17 |
+
"time_text_embed.timestep_embedder.linear_2",
|
| 18 |
+
"txt_norm",
|
| 19 |
+
"img_in",
|
| 20 |
+
"txt_in",
|
| 21 |
+
"transformer_blocks.*.img_mod.1",
|
| 22 |
+
"transformer_blocks.*.attn.norm_q",
|
| 23 |
+
"transformer_blocks.*.attn.norm_k",
|
| 24 |
+
"transformer_blocks.*.attn.to_q",
|
| 25 |
+
"transformer_blocks.*.attn.to_k",
|
| 26 |
+
"transformer_blocks.*.attn.to_v",
|
| 27 |
+
"transformer_blocks.*.attn.add_k_proj",
|
| 28 |
+
"transformer_blocks.*.attn.add_v_proj",
|
| 29 |
+
"transformer_blocks.*.attn.add_q_proj",
|
| 30 |
+
"transformer_blocks.*.attn.to_out.0",
|
| 31 |
+
"transformer_blocks.*.attn.to_add_out",
|
| 32 |
+
"transformer_blocks.*.attn.norm_added_q",
|
| 33 |
+
"transformer_blocks.*.attn.norm_added_k",
|
| 34 |
+
"transformer_blocks.*.img_mlp.net.0.proj",
|
| 35 |
+
"transformer_blocks.*.img_mlp.net.2",
|
| 36 |
+
"transformer_blocks.*.txt_mod.1",
|
| 37 |
+
"transformer_blocks.*.txt_mlp.net.0.proj",
|
| 38 |
+
"transformer_blocks.*.txt_mlp.net.2",
|
| 39 |
+
"norm_out.linear",
|
| 40 |
+
"proj_out",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def convert_from_diffusers(prefix, weights_sd):
|
| 45 |
+
# convert from diffusers(?) to default LoRA
|
| 46 |
+
# Diffusers format: {"diffusion_model.module.name.lora_A.weight": weight, "diffusion_model.module.name.lora_B.weight": weight, ...}
|
| 47 |
+
# default LoRA format: {"prefix_module_name.lora_down.weight": weight, "prefix_module_name.lora_up.weight": weight, ...}
|
| 48 |
+
|
| 49 |
+
# note: Diffusers has no alpha, so alpha is set to rank
|
| 50 |
+
new_weights_sd = {}
|
| 51 |
+
lora_dims = {}
|
| 52 |
+
for key, weight in weights_sd.items():
|
| 53 |
+
diffusers_prefix, key_body = key.split(".", 1)
|
| 54 |
+
if diffusers_prefix != "diffusion_model" and diffusers_prefix != "transformer":
|
| 55 |
+
logger.warning(f"unexpected key: {key} in diffusers format")
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
new_key = f"{prefix}{key_body}".replace(".", "_")
|
| 59 |
+
if "_lora_" in new_key: # LoRA
|
| 60 |
+
new_key = new_key.replace("_lora_A_", ".lora_down.").replace("_lora_B_", ".lora_up.")
|
| 61 |
+
|
| 62 |
+
# support unknown format: do not replace dots but uses lora_down/lora_up/alpha
|
| 63 |
+
new_key = new_key.replace("_lora_down_", ".lora_down.").replace("_lora_up_", ".lora_up.")
|
| 64 |
+
else: # LoHa or LoKr
|
| 65 |
+
new_key = new_key.replace("_hada_", ".hada_").replace("_lokr_", ".lokr_")
|
| 66 |
+
|
| 67 |
+
if new_key.endswith("_alpha"):
|
| 68 |
+
new_key = new_key.replace("_alpha", ".alpha")
|
| 69 |
+
|
| 70 |
+
new_weights_sd[new_key] = weight
|
| 71 |
+
|
| 72 |
+
lora_name = new_key.split(".")[0] # before first dot
|
| 73 |
+
if lora_name not in lora_dims and "lora_down" in new_key:
|
| 74 |
+
lora_dims[lora_name] = weight.shape[0]
|
| 75 |
+
|
| 76 |
+
# add alpha with rank
|
| 77 |
+
for lora_name, dim in lora_dims.items():
|
| 78 |
+
alpha_key = f"{lora_name}.alpha"
|
| 79 |
+
if alpha_key not in new_weights_sd:
|
| 80 |
+
new_weights_sd[f"{lora_name}.alpha"] = torch.tensor(dim)
|
| 81 |
+
|
| 82 |
+
return new_weights_sd
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def convert_to_diffusers(prefix, diffusers_prefix, weights_sd):
|
| 86 |
+
# convert from default LoRA to diffusers
|
| 87 |
+
if diffusers_prefix is None:
|
| 88 |
+
diffusers_prefix = "diffusion_model"
|
| 89 |
+
|
| 90 |
+
# make reverse map from LoRA name to base model module name
|
| 91 |
+
lora_name_to_module_name = {}
|
| 92 |
+
for key in QWEN_IMAGE_KEYS:
|
| 93 |
+
if "*" not in key:
|
| 94 |
+
lora_name = prefix + key.replace(".", "_")
|
| 95 |
+
lora_name_to_module_name[lora_name] = key
|
| 96 |
+
else:
|
| 97 |
+
lora_name = prefix + key.replace(".", "_")
|
| 98 |
+
for i in range(100): # assume at most 100 transformer blocks
|
| 99 |
+
lora_name_to_module_name[lora_name.replace("*", str(i))] = key.replace("*", str(i))
|
| 100 |
+
|
| 101 |
+
# get alphas
|
| 102 |
+
lora_alphas = {}
|
| 103 |
+
for key, weight in weights_sd.items():
|
| 104 |
+
if key.startswith(prefix):
|
| 105 |
+
lora_name = key.split(".", 1)[0] # before first dot
|
| 106 |
+
if lora_name not in lora_alphas and "alpha" in key:
|
| 107 |
+
lora_alphas[lora_name] = weight
|
| 108 |
+
|
| 109 |
+
new_weights_sd = {}
|
| 110 |
+
estimated_type = None
|
| 111 |
+
for key, weight in weights_sd.items():
|
| 112 |
+
if key.startswith(prefix):
|
| 113 |
+
if "alpha" in key:
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
lora_name, weight_name = key.split(".", 1)
|
| 117 |
+
|
| 118 |
+
if lora_name in lora_name_to_module_name:
|
| 119 |
+
module_name = lora_name_to_module_name[lora_name]
|
| 120 |
+
else:
|
| 121 |
+
module_name = lora_name[len(prefix) :] # remove "lora_unet_"
|
| 122 |
+
module_name = module_name.replace("_", ".") # replace "_" with "."
|
| 123 |
+
if ".cross.attn." in module_name or ".self.attn." in module_name:
|
| 124 |
+
# Wan2.1 lora name to module name: ugly but works
|
| 125 |
+
module_name = module_name.replace("cross.attn", "cross_attn") # fix cross attn
|
| 126 |
+
module_name = module_name.replace("self.attn", "self_attn") # fix self attn
|
| 127 |
+
module_name = module_name.replace("k.img", "k_img") # fix k img
|
| 128 |
+
module_name = module_name.replace("v.img", "v_img") # fix v img
|
| 129 |
+
elif ".attention.to." in module_name or ".feed.forward." in module_name:
|
| 130 |
+
# Z-Image lora name to module name: ugly but works
|
| 131 |
+
module_name = module_name.replace("to.q", "to_q") # fix to q
|
| 132 |
+
module_name = module_name.replace("to.k", "to_k") # fix to k
|
| 133 |
+
module_name = module_name.replace("to.v", "to_v") # fix to v
|
| 134 |
+
module_name = module_name.replace("to.out", "to_out") # fix to out
|
| 135 |
+
module_name = module_name.replace("feed.forward", "feed_forward") # fix feed forward
|
| 136 |
+
elif "double.blocks." in module_name or "single.blocks." in module_name:
|
| 137 |
+
# HunyuanVideo and FLUX lora name to module name: ugly but works
|
| 138 |
+
module_name = module_name.replace("double.blocks.", "double_blocks.") # fix double blocks
|
| 139 |
+
module_name = module_name.replace("single.blocks.", "single_blocks.") # fix single blocks
|
| 140 |
+
module_name = module_name.replace("img.", "img_") # fix img
|
| 141 |
+
module_name = module_name.replace("txt.", "txt_") # fix txt
|
| 142 |
+
module_name = module_name.replace("attn.", "attn_") # fix attn
|
| 143 |
+
|
| 144 |
+
dim = None # None means LoHa or LoKr, otherwise it's LoRA with alpha and dim is used for scaling
|
| 145 |
+
if "lora_down" in key:
|
| 146 |
+
new_key = f"{diffusers_prefix}.{module_name}.lora_A.weight"
|
| 147 |
+
dim = weight.shape[0]
|
| 148 |
+
elif "lora_up" in key:
|
| 149 |
+
new_key = f"{diffusers_prefix}.{module_name}.lora_B.weight"
|
| 150 |
+
dim = weight.shape[1]
|
| 151 |
+
elif "hada" in key or "lokr" in key: # LoHa or LoKr
|
| 152 |
+
new_key = f"{diffusers_prefix}.{module_name}.{weight_name}"
|
| 153 |
+
if "hada" in key:
|
| 154 |
+
estimated_type = "LoHa"
|
| 155 |
+
elif "lokr" in key:
|
| 156 |
+
estimated_type = "LoKr"
|
| 157 |
+
else:
|
| 158 |
+
logger.warning(f"unexpected key: {key} in default LoRA format")
|
| 159 |
+
continue
|
| 160 |
+
if dim is not None:
|
| 161 |
+
estimated_type = "LoRA"
|
| 162 |
+
|
| 163 |
+
# scale weight by alpha for LoRA with alpha (e.g., LyCORIS), to match Diffusers format which has no alpha (alpha is effectively 1)
|
| 164 |
+
if lora_name in lora_alphas and dim is not None:
|
| 165 |
+
# we scale both down and up, so scale is sqrt
|
| 166 |
+
scale = lora_alphas[lora_name] / dim
|
| 167 |
+
scale = scale.sqrt()
|
| 168 |
+
weight = weight * scale
|
| 169 |
+
else:
|
| 170 |
+
if dim is not None:
|
| 171 |
+
logger.warning(f"missing alpha for {lora_name}")
|
| 172 |
+
else:
|
| 173 |
+
# for LoHa or LoKr, we copy alpha if exists
|
| 174 |
+
if lora_name in lora_alphas:
|
| 175 |
+
new_weights_sd[f"{diffusers_prefix}.{module_name}.alpha"] = lora_alphas[lora_name]
|
| 176 |
+
|
| 177 |
+
new_weights_sd[new_key] = weight
|
| 178 |
+
|
| 179 |
+
logger.info(f"estimated type: {estimated_type}")
|
| 180 |
+
return new_weights_sd
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def convert(input_file, output_file, target_format, diffusers_prefix):
|
| 184 |
+
logger.info(f"loading {input_file}")
|
| 185 |
+
weights_sd = load_file(input_file)
|
| 186 |
+
with safe_open(input_file, framework="pt") as f:
|
| 187 |
+
metadata = f.metadata()
|
| 188 |
+
|
| 189 |
+
logger.info(f"converting to {target_format}")
|
| 190 |
+
prefix = "lora_unet_"
|
| 191 |
+
if target_format == "default":
|
| 192 |
+
new_weights_sd = convert_from_diffusers(prefix, weights_sd)
|
| 193 |
+
metadata = metadata or {}
|
| 194 |
+
model_utils.precalculate_safetensors_hashes(new_weights_sd, metadata)
|
| 195 |
+
elif target_format == "other":
|
| 196 |
+
new_weights_sd = convert_to_diffusers(prefix, diffusers_prefix, weights_sd)
|
| 197 |
+
else:
|
| 198 |
+
raise ValueError(f"unknown target format: {target_format}")
|
| 199 |
+
|
| 200 |
+
logger.info(f"saving to {output_file}")
|
| 201 |
+
save_file(new_weights_sd, output_file, metadata=metadata)
|
| 202 |
+
|
| 203 |
+
logger.info("done")
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def parse_args():
|
| 207 |
+
parser = argparse.ArgumentParser(description="Convert LoRA/LoHa/LoKr weights between default and other formats")
|
| 208 |
+
parser.add_argument("--input", type=str, required=True, help="input model file")
|
| 209 |
+
parser.add_argument("--output", type=str, required=True, help="output model file")
|
| 210 |
+
parser.add_argument("--target", type=str, required=True, choices=["other", "default"], help="target format")
|
| 211 |
+
parser.add_argument(
|
| 212 |
+
"--diffusers_prefix", type=str, default=None, help="prefix for Diffusers weights, default is None (use `diffusion_model`)"
|
| 213 |
+
)
|
| 214 |
+
args = parser.parse_args()
|
| 215 |
+
return args
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def main():
|
| 219 |
+
args = parse_args()
|
| 220 |
+
convert(args.input, args.output, args.target, args.diffusers_prefix)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
main()
|
src/musubi_tuner/crepa.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Cross-frame representation alignment regularization for video DiT fine-tuning.
|
| 2 |
+
|
| 3 |
+
Training-time regularization that aligns DiT hidden states across video frames
|
| 4 |
+
by encouraging temporal consistency in a learned feature space.
|
| 5 |
+
|
| 6 |
+
Two modes:
|
| 7 |
+
- **backbone**: teacher signal from a deeper transformer block within the same model.
|
| 8 |
+
Inspired by SimpleTuner's LayerSync (https://github.com/bghira/SimpleTuner).
|
| 9 |
+
- **dino**: teacher signal from pre-cached DINOv2 per-frame patch tokens (zero VRAM
|
| 10 |
+
at training time — features are loaded from disk).
|
| 11 |
+
Based on CREPA – Cross-frame Representation Alignment (arxiv 2506.09229).
|
| 12 |
+
|
| 13 |
+
Only the small projector MLP is trained; all other modules stay frozen.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
import logging
|
| 19 |
+
import math
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Any, Dict, List, Optional
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn as nn
|
| 25 |
+
import torch.nn.functional as F
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
# DINOv2 model name → token dimension
|
| 30 |
+
DINO_DIMS = {
|
| 31 |
+
"dinov2_vits14": 384,
|
| 32 |
+
"dinov2_vitb14": 768,
|
| 33 |
+
"dinov2_vitl14": 1024,
|
| 34 |
+
"dinov2_vitg14": 1536,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ---------------------------------------------------------------------------
|
| 39 |
+
# Config
|
| 40 |
+
# ---------------------------------------------------------------------------
|
| 41 |
+
|
| 42 |
+
@dataclass
|
| 43 |
+
class CREPAConfig:
|
| 44 |
+
mode: str = "backbone" # "backbone" | "dino"
|
| 45 |
+
student_block_idx: int = 16 # block whose hidden states are aligned
|
| 46 |
+
teacher_block_idx: int = 32 # backbone teacher block (backbone mode)
|
| 47 |
+
dino_model: str = "dinov2_vitb14" # DINOv2 model name (dino mode, future)
|
| 48 |
+
lambda_crepa: float = 0.1 # loss weight
|
| 49 |
+
tau: float = 1.0 # temporal neighbor decay factor
|
| 50 |
+
num_neighbors: int = 2 # K frames on each side
|
| 51 |
+
schedule: str = "constant" # "constant" | "linear" | "cosine"
|
| 52 |
+
warmup_steps: int = 0
|
| 53 |
+
max_steps: int = 0 # needed for cosine/linear schedules
|
| 54 |
+
normalize: bool = True # L2-normalize features before similarity
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# ---------------------------------------------------------------------------
|
| 58 |
+
# Projector MLP
|
| 59 |
+
# ---------------------------------------------------------------------------
|
| 60 |
+
|
| 61 |
+
class CREPAProjector(nn.Module):
|
| 62 |
+
"""Small 2-layer MLP: student_dim → teacher_dim."""
|
| 63 |
+
|
| 64 |
+
def __init__(self, in_dim: int, out_dim: int):
|
| 65 |
+
super().__init__()
|
| 66 |
+
self.net = nn.Sequential(
|
| 67 |
+
nn.Linear(in_dim, in_dim),
|
| 68 |
+
nn.GELU(),
|
| 69 |
+
nn.Linear(in_dim, out_dim),
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 73 |
+
return self.net(x)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
# Main module
|
| 78 |
+
# ---------------------------------------------------------------------------
|
| 79 |
+
|
| 80 |
+
class CREPAModule:
|
| 81 |
+
"""Orchestrates hook installation, feature capture, and loss computation."""
|
| 82 |
+
|
| 83 |
+
def __init__(self, config: CREPAConfig, transformer: nn.Module):
|
| 84 |
+
self.config = config
|
| 85 |
+
self.transformer = transformer
|
| 86 |
+
|
| 87 |
+
self.projector: Optional[CREPAProjector] = None
|
| 88 |
+
self._student_features: Optional[torch.Tensor] = None
|
| 89 |
+
self._teacher_features: Optional[torch.Tensor] = None
|
| 90 |
+
self._hooks: list = []
|
| 91 |
+
self._current_lambda: float = config.lambda_crepa
|
| 92 |
+
# Track the number of temporal tokens per frame for reshape
|
| 93 |
+
self._num_temporal_frames: Optional[int] = None
|
| 94 |
+
|
| 95 |
+
# ----- setup ----------------------------------------------------------
|
| 96 |
+
|
| 97 |
+
def setup(self, device: torch.device, dtype: torch.dtype) -> None:
|
| 98 |
+
"""Create projector, install hooks. Call once after model is ready."""
|
| 99 |
+
cfg = self.config
|
| 100 |
+
|
| 101 |
+
# Determine dimensions from transformer blocks
|
| 102 |
+
blocks = self.transformer.transformer_blocks
|
| 103 |
+
num_blocks = len(blocks)
|
| 104 |
+
|
| 105 |
+
if cfg.student_block_idx >= num_blocks:
|
| 106 |
+
raise ValueError(
|
| 107 |
+
f"student_block_idx={cfg.student_block_idx} out of range (model has {num_blocks} blocks)"
|
| 108 |
+
)
|
| 109 |
+
if cfg.mode == "backbone" and cfg.teacher_block_idx >= num_blocks:
|
| 110 |
+
raise ValueError(
|
| 111 |
+
f"teacher_block_idx={cfg.teacher_block_idx} out of range (model has {num_blocks} blocks)"
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# inner_dim is the hidden dimension of video hidden states (TransformerArgs.x)
|
| 115 |
+
# For LTX-2: inner_dim = num_attention_heads * attention_head_dim = 32 * 128 = 4096
|
| 116 |
+
inner_dim = self.transformer.inner_dim
|
| 117 |
+
|
| 118 |
+
if cfg.mode == "backbone":
|
| 119 |
+
# Both student and teacher are from the same model → same dim
|
| 120 |
+
self.projector = CREPAProjector(inner_dim, inner_dim).to(device=device, dtype=dtype)
|
| 121 |
+
logger.info(
|
| 122 |
+
"CREPA backbone mode: student_block=%d, teacher_block=%d, dim=%d, projector params=%s",
|
| 123 |
+
cfg.student_block_idx,
|
| 124 |
+
cfg.teacher_block_idx,
|
| 125 |
+
inner_dim,
|
| 126 |
+
f"{sum(p.numel() for p in self.projector.parameters()):,}",
|
| 127 |
+
)
|
| 128 |
+
elif cfg.mode == "dino":
|
| 129 |
+
dino_dim = DINO_DIMS.get(cfg.dino_model)
|
| 130 |
+
if dino_dim is None:
|
| 131 |
+
raise ValueError(
|
| 132 |
+
f"Unknown DINOv2 model '{cfg.dino_model}'. "
|
| 133 |
+
f"Supported: {', '.join(DINO_DIMS.keys())}"
|
| 134 |
+
)
|
| 135 |
+
# Project student DiT features → DINOv2 feature space
|
| 136 |
+
self.projector = CREPAProjector(inner_dim, dino_dim).to(device=device, dtype=dtype)
|
| 137 |
+
logger.info(
|
| 138 |
+
"CREPA dino mode: student_block=%d, dino_model=%s (dim=%d), projector params=%s",
|
| 139 |
+
cfg.student_block_idx,
|
| 140 |
+
cfg.dino_model,
|
| 141 |
+
dino_dim,
|
| 142 |
+
f"{sum(p.numel() for p in self.projector.parameters()):,}",
|
| 143 |
+
)
|
| 144 |
+
else:
|
| 145 |
+
raise NotImplementedError(f"CREPA mode '{cfg.mode}' not implemented")
|
| 146 |
+
|
| 147 |
+
self._install_hooks()
|
| 148 |
+
|
| 149 |
+
# ----- hooks ----------------------------------------------------------
|
| 150 |
+
|
| 151 |
+
def _install_hooks(self) -> None:
|
| 152 |
+
blocks = self.transformer.transformer_blocks
|
| 153 |
+
cfg = self.config
|
| 154 |
+
|
| 155 |
+
def _make_student_hook():
|
| 156 |
+
def hook(_module, _input, output):
|
| 157 |
+
# output is (video: TransformerArgs|None, audio: TransformerArgs|None)
|
| 158 |
+
video_out = output[0]
|
| 159 |
+
if video_out is not None:
|
| 160 |
+
# .x has shape [B, T*H*W, D]
|
| 161 |
+
self._student_features = video_out.x
|
| 162 |
+
return hook
|
| 163 |
+
|
| 164 |
+
def _make_teacher_hook():
|
| 165 |
+
def hook(_module, _input, output):
|
| 166 |
+
video_out = output[0]
|
| 167 |
+
if video_out is not None:
|
| 168 |
+
self._teacher_features = video_out.x.detach()
|
| 169 |
+
return hook
|
| 170 |
+
|
| 171 |
+
h1 = blocks[cfg.student_block_idx].register_forward_hook(_make_student_hook())
|
| 172 |
+
self._hooks.append(h1)
|
| 173 |
+
|
| 174 |
+
if cfg.mode == "backbone":
|
| 175 |
+
h2 = blocks[cfg.teacher_block_idx].register_forward_hook(_make_teacher_hook())
|
| 176 |
+
self._hooks.append(h2)
|
| 177 |
+
|
| 178 |
+
logger.info("CREPA: installed %d forward hooks", len(self._hooks))
|
| 179 |
+
|
| 180 |
+
# ----- trainable params -----------------------------------------------
|
| 181 |
+
|
| 182 |
+
def get_trainable_params(self) -> List[torch.nn.Parameter]:
|
| 183 |
+
if self.projector is None:
|
| 184 |
+
return []
|
| 185 |
+
return list(self.projector.parameters())
|
| 186 |
+
|
| 187 |
+
# ----- schedule -------------------------------------------------------
|
| 188 |
+
|
| 189 |
+
def on_step(self, global_step: int) -> None:
|
| 190 |
+
cfg = self.config
|
| 191 |
+
if cfg.schedule == "constant":
|
| 192 |
+
self._current_lambda = cfg.lambda_crepa
|
| 193 |
+
return
|
| 194 |
+
|
| 195 |
+
if cfg.warmup_steps > 0 and global_step < cfg.warmup_steps:
|
| 196 |
+
self._current_lambda = cfg.lambda_crepa * (global_step / cfg.warmup_steps)
|
| 197 |
+
return
|
| 198 |
+
|
| 199 |
+
if cfg.max_steps <= 0:
|
| 200 |
+
self._current_lambda = cfg.lambda_crepa
|
| 201 |
+
return
|
| 202 |
+
|
| 203 |
+
progress = min((global_step - cfg.warmup_steps) / max(cfg.max_steps - cfg.warmup_steps, 1), 1.0)
|
| 204 |
+
|
| 205 |
+
if cfg.schedule == "linear":
|
| 206 |
+
self._current_lambda = cfg.lambda_crepa * (1.0 - progress)
|
| 207 |
+
elif cfg.schedule == "cosine":
|
| 208 |
+
self._current_lambda = cfg.lambda_crepa * 0.5 * (1.0 + math.cos(math.pi * progress))
|
| 209 |
+
else:
|
| 210 |
+
self._current_lambda = cfg.lambda_crepa
|
| 211 |
+
|
| 212 |
+
# ----- loss -----------------------------------------------------------
|
| 213 |
+
|
| 214 |
+
def compute_loss(
|
| 215 |
+
self,
|
| 216 |
+
num_latent_frames: int,
|
| 217 |
+
dino_features: Optional[torch.Tensor] = None,
|
| 218 |
+
) -> Optional[torch.Tensor]:
|
| 219 |
+
"""Compute CREPA loss from captured features.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
num_latent_frames: number of temporal frames in the latent space (T).
|
| 223 |
+
dino_features: pre-cached DINOv2 patch tokens ``[B, T_pixel, N_patches, D_dino]``
|
| 224 |
+
(only used in dino mode).
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
Scalar loss tensor, or None if features were not captured.
|
| 228 |
+
"""
|
| 229 |
+
cfg = self.config
|
| 230 |
+
|
| 231 |
+
if cfg.mode == "dino":
|
| 232 |
+
return self._compute_loss_dino(num_latent_frames, dino_features)
|
| 233 |
+
else:
|
| 234 |
+
return self._compute_loss_backbone(num_latent_frames)
|
| 235 |
+
|
| 236 |
+
def _compute_loss_backbone(self, num_latent_frames: int) -> Optional[torch.Tensor]:
|
| 237 |
+
if self._student_features is None or self._teacher_features is None:
|
| 238 |
+
return None
|
| 239 |
+
if self._current_lambda == 0.0:
|
| 240 |
+
return None
|
| 241 |
+
|
| 242 |
+
cfg = self.config
|
| 243 |
+
student_feat = self._student_features # [B, T*H*W, D]
|
| 244 |
+
teacher_feat = self._teacher_features # [B, T*H*W, D]
|
| 245 |
+
|
| 246 |
+
B, THW, D_s = student_feat.shape
|
| 247 |
+
T = num_latent_frames
|
| 248 |
+
if T <= 0 or THW % T != 0:
|
| 249 |
+
logger.warning("CREPA: cannot reshape features (THW=%d, T=%d), skipping", THW, T)
|
| 250 |
+
return None
|
| 251 |
+
HW = THW // T
|
| 252 |
+
|
| 253 |
+
B_t, THW_t, D_t = teacher_feat.shape
|
| 254 |
+
HW_t = THW_t // T
|
| 255 |
+
|
| 256 |
+
# Project student features
|
| 257 |
+
projected = self.projector(student_feat) # [B, T*H*W, D_t]
|
| 258 |
+
|
| 259 |
+
# Reshape to frame-level and average pool spatial dims → [B, T, D]
|
| 260 |
+
proj_frames = projected.reshape(B, T, HW, -1).mean(dim=2)
|
| 261 |
+
teach_frames = teacher_feat.reshape(B_t, T, HW_t, D_t).mean(dim=2)
|
| 262 |
+
|
| 263 |
+
return self._similarity_loss(proj_frames, teach_frames, T)
|
| 264 |
+
|
| 265 |
+
def _compute_loss_dino(
|
| 266 |
+
self,
|
| 267 |
+
num_latent_frames: int,
|
| 268 |
+
dino_features: Optional[torch.Tensor],
|
| 269 |
+
) -> Optional[torch.Tensor]:
|
| 270 |
+
if self._student_features is None:
|
| 271 |
+
return None
|
| 272 |
+
if dino_features is None:
|
| 273 |
+
return None
|
| 274 |
+
if self._current_lambda == 0.0:
|
| 275 |
+
return None
|
| 276 |
+
|
| 277 |
+
student_feat = self._student_features # [B, T_latent*H*W, D_s]
|
| 278 |
+
B, THW, D_s = student_feat.shape
|
| 279 |
+
T = num_latent_frames
|
| 280 |
+
if T <= 0 or THW % T != 0:
|
| 281 |
+
logger.warning("CREPA dino: cannot reshape features (THW=%d, T=%d), skipping", THW, T)
|
| 282 |
+
return None
|
| 283 |
+
HW = THW // T
|
| 284 |
+
|
| 285 |
+
# Project student → DINOv2 space: [B, T*H*W, D_dino]
|
| 286 |
+
projected = self.projector(student_feat)
|
| 287 |
+
# Reshape to [B, T, HW, D_dino] — keep spatial tokens (no mean-pool)
|
| 288 |
+
proj_frames = projected.reshape(B, T, HW, -1)
|
| 289 |
+
|
| 290 |
+
# dino_features: [B, T_pixel, N_patches, D_dino]
|
| 291 |
+
dino_features = dino_features.to(device=proj_frames.device, dtype=proj_frames.dtype)
|
| 292 |
+
T_pixel = dino_features.shape[1]
|
| 293 |
+
|
| 294 |
+
# Temporal alignment: subsample T_pixel → T_latent
|
| 295 |
+
if T_pixel != T:
|
| 296 |
+
# Select evenly-spaced frame indices
|
| 297 |
+
indices = torch.linspace(0, T_pixel - 1, T, device=dino_features.device).long()
|
| 298 |
+
teach_frames = dino_features[:, indices] # [B, T, N_patches, D]
|
| 299 |
+
else:
|
| 300 |
+
teach_frames = dino_features # [B, T, N_patches, D]
|
| 301 |
+
|
| 302 |
+
# Spatial alignment: interpolate token counts if HW != N_patches
|
| 303 |
+
N_teach = teach_frames.shape[2]
|
| 304 |
+
if HW != N_teach:
|
| 305 |
+
# Interpolate student spatial tokens to match teacher count
|
| 306 |
+
# [B, T, HW, D] → [B*T, D, HW] → interpolate → [B*T, D, N_teach] → [B, T, N_teach, D]
|
| 307 |
+
D_dino = proj_frames.shape[-1]
|
| 308 |
+
proj_flat = proj_frames.reshape(B * T, HW, D_dino).permute(0, 2, 1) # [B*T, D, HW]
|
| 309 |
+
proj_flat = F.interpolate(proj_flat, size=N_teach, mode="linear", align_corners=False)
|
| 310 |
+
proj_frames = proj_flat.permute(0, 2, 1).reshape(B, T, N_teach, D_dino) # [B, T, N_teach, D]
|
| 311 |
+
|
| 312 |
+
# proj_frames: [B, T, N, D], teach_frames: [B, T, N, D]
|
| 313 |
+
return self._similarity_loss(proj_frames, teach_frames, T)
|
| 314 |
+
|
| 315 |
+
def _similarity_loss(
|
| 316 |
+
self,
|
| 317 |
+
proj_frames: torch.Tensor,
|
| 318 |
+
teach_frames: torch.Tensor,
|
| 319 |
+
T: int,
|
| 320 |
+
) -> Optional[torch.Tensor]:
|
| 321 |
+
"""Shared cosine-similarity + neighbor weighting loss.
|
| 322 |
+
|
| 323 |
+
Supports both 3D ``[B, T, D]`` (backbone mode) and 4D ``[B, T, N, D]``
|
| 324 |
+
(dino patch mode). For 4D input, computes per-patch cosine similarity
|
| 325 |
+
and averages over the patch dimension.
|
| 326 |
+
"""
|
| 327 |
+
cfg = self.config
|
| 328 |
+
B = proj_frames.shape[0]
|
| 329 |
+
is_4d = proj_frames.ndim == 4
|
| 330 |
+
|
| 331 |
+
if cfg.normalize:
|
| 332 |
+
proj_frames = F.normalize(proj_frames, dim=-1)
|
| 333 |
+
teach_frames = F.normalize(teach_frames, dim=-1)
|
| 334 |
+
|
| 335 |
+
if is_4d:
|
| 336 |
+
# [B, T, N, D] — per-patch cosine similarity, mean over patches
|
| 337 |
+
# sim[b, t1, t2] = mean_over_n( sum_d(proj[b,t1,n,d] * teach[b,t2,n,d]) )
|
| 338 |
+
# Use einsum: [B, T1, N, D] x [B, T2, N, D] → [B, T1, T2, N] → mean over N
|
| 339 |
+
sim = torch.einsum("btnd,bsnd->btsn", proj_frames, teach_frames).mean(dim=-1) # [B, T, T]
|
| 340 |
+
else:
|
| 341 |
+
# [B, T, D] — standard cosine similarity matrix
|
| 342 |
+
sim = torch.bmm(proj_frames, teach_frames.transpose(1, 2)) # [B, T, T]
|
| 343 |
+
|
| 344 |
+
K = cfg.num_neighbors
|
| 345 |
+
tau = cfg.tau
|
| 346 |
+
|
| 347 |
+
loss = torch.zeros(B, device=sim.device, dtype=sim.dtype)
|
| 348 |
+
for f in range(T):
|
| 349 |
+
loss = loss - sim[:, f, f]
|
| 350 |
+
for delta in range(1, K + 1):
|
| 351 |
+
weight = math.exp(-delta / tau)
|
| 352 |
+
if f - delta >= 0:
|
| 353 |
+
loss = loss - weight * sim[:, f, f - delta]
|
| 354 |
+
if f + delta < T:
|
| 355 |
+
loss = loss - weight * sim[:, f, f + delta]
|
| 356 |
+
|
| 357 |
+
# Normalize by number of terms per frame
|
| 358 |
+
num_terms = T
|
| 359 |
+
for f in range(T):
|
| 360 |
+
for delta in range(1, K + 1):
|
| 361 |
+
if f - delta >= 0:
|
| 362 |
+
num_terms += 1
|
| 363 |
+
if f + delta < T:
|
| 364 |
+
num_terms += 1
|
| 365 |
+
loss = loss.mean() / max(num_terms / T, 1.0)
|
| 366 |
+
|
| 367 |
+
crepa_loss = loss * self._current_lambda
|
| 368 |
+
|
| 369 |
+
if not torch.isfinite(crepa_loss):
|
| 370 |
+
logger.warning("CREPA loss is non-finite (%.4g), skipping", crepa_loss.item())
|
| 371 |
+
return None
|
| 372 |
+
|
| 373 |
+
return crepa_loss
|
| 374 |
+
|
| 375 |
+
# ----- cleanup --------------------------------------------------------
|
| 376 |
+
|
| 377 |
+
def cleanup_step(self) -> None:
|
| 378 |
+
"""Clear captured features for next step."""
|
| 379 |
+
self._student_features = None
|
| 380 |
+
self._teacher_features = None
|
| 381 |
+
|
| 382 |
+
def remove_hooks(self) -> None:
|
| 383 |
+
for h in self._hooks:
|
| 384 |
+
h.remove()
|
| 385 |
+
self._hooks.clear()
|
| 386 |
+
logger.info("CREPA: removed all hooks")
|
| 387 |
+
|
| 388 |
+
# ----- checkpoint -----------------------------------------------------
|
| 389 |
+
|
| 390 |
+
def state_dict(self) -> Dict[str, Any]:
|
| 391 |
+
if self.projector is None:
|
| 392 |
+
return {}
|
| 393 |
+
return self.projector.state_dict()
|
| 394 |
+
|
| 395 |
+
def load_state_dict(self, sd: Dict[str, Any]) -> None:
|
| 396 |
+
if self.projector is not None and sd:
|
| 397 |
+
self.projector.load_state_dict(sd)
|
| 398 |
+
logger.info("CREPA: loaded projector weights (%d tensors)", len(sd))
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
# ---------------------------------------------------------------------------
|
| 402 |
+
# CLI arg parsing helper
|
| 403 |
+
# ---------------------------------------------------------------------------
|
| 404 |
+
|
| 405 |
+
def parse_crepa_args(raw_args: Optional[list[str]]) -> Dict[str, str]:
|
| 406 |
+
"""Parse ``key=value`` list into a dict. Returns empty dict for None/[]."""
|
| 407 |
+
if not raw_args:
|
| 408 |
+
return {}
|
| 409 |
+
out: Dict[str, str] = {}
|
| 410 |
+
for item in raw_args:
|
| 411 |
+
if "=" not in item:
|
| 412 |
+
raise ValueError(f"CREPA arg must be key=value, got: {item!r}")
|
| 413 |
+
k, v = item.split("=", 1)
|
| 414 |
+
out[k.strip()] = v.strip()
|
| 415 |
+
return out
|
src/musubi_tuner/flux_2_cache_latents.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from musubi_tuner.dataset import config_utils
|
| 8 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 9 |
+
from musubi_tuner.dataset.image_video_dataset import ItemInfo, save_latent_cache_flux_2
|
| 10 |
+
from musubi_tuner.flux_2 import flux2_utils
|
| 11 |
+
from musubi_tuner.flux_2 import flux2_models
|
| 12 |
+
import musubi_tuner.cache_latents as cache_latents
|
| 13 |
+
from musubi_tuner.utils.model_utils import str_to_dtype
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def preprocess_contents_flux_2(batch: List[ItemInfo]) -> tuple[torch.Tensor, List[List[np.ndarray]]]:
|
| 20 |
+
# item.content: target image (H, W, C)
|
| 21 |
+
# item.control_content: list of images (H, W, C), optional
|
| 22 |
+
|
| 23 |
+
# Stack batch into target tensor (B,H,W,C) in RGB order and control images list of tensors (H, W, C)
|
| 24 |
+
contents = []
|
| 25 |
+
for item in batch:
|
| 26 |
+
content = item.content
|
| 27 |
+
content = content[0] if isinstance(content, list) else content # (H, W, C)
|
| 28 |
+
contents.append(torch.from_numpy(content)) # target image
|
| 29 |
+
|
| 30 |
+
contents = torch.stack(contents, dim=0) # B, H, W, C
|
| 31 |
+
contents = contents.permute(0, 3, 1, 2) # B, H, W, C -> B, C, H, W
|
| 32 |
+
contents = contents / 127.5 - 1.0 # normalize to [-1, 1]
|
| 33 |
+
|
| 34 |
+
controls = []
|
| 35 |
+
for item in batch:
|
| 36 |
+
if item.control_content is not None and len(item.control_content) > 0:
|
| 37 |
+
controls.append([torch.from_numpy(cc[..., :3]) for cc in item.control_content]) # ensure RGB, remove alpha if present
|
| 38 |
+
|
| 39 |
+
if len(controls) > 0: # controls is list of list of (H, W, C), where H, W can vary
|
| 40 |
+
controls = [[c.permute(2, 0, 1) for c in cl] for cl in controls] # list of list of (H, W, C) -> list of list of (C, H, W)
|
| 41 |
+
controls = [[c / 127.5 - 1.0 for c in cl] for cl in controls] # normalize to [-1, 1]
|
| 42 |
+
else:
|
| 43 |
+
controls = None
|
| 44 |
+
|
| 45 |
+
return contents, controls
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def encode_and_save_batch(ae: flux2_models.AutoEncoder, batch: List[ItemInfo], arch_full: str):
|
| 49 |
+
# item.content: target image (H, W, C)
|
| 50 |
+
# item.control_content: list of images (H, W, C)
|
| 51 |
+
|
| 52 |
+
contents, controls = preprocess_contents_flux_2(batch)
|
| 53 |
+
|
| 54 |
+
with torch.no_grad():
|
| 55 |
+
latents = ae.encode(contents.to(ae.device, dtype=ae.dtype)) # B, C, H, W
|
| 56 |
+
if controls is not None:
|
| 57 |
+
control_latents = [[ae.encode(c.to(ae.device, dtype=ae.dtype).unsqueeze(0))[0] for c in cl] for cl in controls]
|
| 58 |
+
# now control_latents is list of list of (C, H, W) tensors
|
| 59 |
+
else:
|
| 60 |
+
control_latents = None
|
| 61 |
+
|
| 62 |
+
# save cache for each item in the batch
|
| 63 |
+
for b, item in enumerate(batch):
|
| 64 |
+
target_latent = latents[b] # C, H, W. Target latents for this image (ground truth)
|
| 65 |
+
control_latent = control_latents[b] if control_latents is not None else None # list of (C, H, W) tensors or None
|
| 66 |
+
|
| 67 |
+
print(
|
| 68 |
+
f"Saving cache for item {item.item_key} at {item.latent_cache_path}, target latents shape: {target_latent.shape}, "
|
| 69 |
+
f"control latents shape: {[cl.shape for cl in control_latent] if control_latent is not None else None}"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# save cache (file path is inside item.latent_cache_path pattern)
|
| 73 |
+
save_latent_cache_flux_2(
|
| 74 |
+
item_info=item,
|
| 75 |
+
latent=target_latent, # Ground truth for this image
|
| 76 |
+
control_latent=control_latent, # Control latent for this image
|
| 77 |
+
arch_full=arch_full,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def main():
|
| 82 |
+
parser = cache_latents.setup_parser_common()
|
| 83 |
+
flux2_utils.add_model_version_args(parser)
|
| 84 |
+
|
| 85 |
+
args = parser.parse_args()
|
| 86 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 87 |
+
|
| 88 |
+
if args.disable_cudnn_backend:
|
| 89 |
+
logger.info("Disabling cuDNN PyTorch backend.")
|
| 90 |
+
torch.backends.cudnn.enabled = False
|
| 91 |
+
|
| 92 |
+
device = args.device if hasattr(args, "device") and args.device else ("cuda" if torch.cuda.is_available() else "cpu")
|
| 93 |
+
device = torch.device(device)
|
| 94 |
+
|
| 95 |
+
# Load dataset config
|
| 96 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 97 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 98 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 99 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=model_version_info.architecture)
|
| 100 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 101 |
+
|
| 102 |
+
datasets = train_dataset_group.datasets
|
| 103 |
+
|
| 104 |
+
if args.debug_mode is not None:
|
| 105 |
+
cache_latents.show_datasets(
|
| 106 |
+
datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16
|
| 107 |
+
)
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
assert args.vae is not None, "ae checkpoint is required"
|
| 111 |
+
|
| 112 |
+
logger.info(f"Loading AE model from {args.vae}")
|
| 113 |
+
vae_dtype = torch.float32 if args.vae_dtype is None else str_to_dtype(args.vae_dtype)
|
| 114 |
+
ae = flux2_utils.load_ae(args.vae, dtype=vae_dtype, device=device, disable_mmap=True)
|
| 115 |
+
ae.to(device)
|
| 116 |
+
|
| 117 |
+
# encoding closure
|
| 118 |
+
def encode(batch: List[ItemInfo]):
|
| 119 |
+
encode_and_save_batch(ae, batch, model_version_info.architecture_full)
|
| 120 |
+
|
| 121 |
+
# reuse core loop from cache_latents with no change
|
| 122 |
+
cache_latents.encode_datasets(datasets, encode, args)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
main()
|
src/musubi_tuner/flux_2_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from musubi_tuner.dataset import config_utils
|
| 6 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 7 |
+
|
| 8 |
+
from musubi_tuner.dataset.image_video_dataset import ItemInfo, save_text_encoder_output_cache_flux_2
|
| 9 |
+
|
| 10 |
+
from musubi_tuner.flux_2 import flux2_utils
|
| 11 |
+
import musubi_tuner.cache_text_encoder_outputs as cache_text_encoder_outputs
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def encode_and_save_batch(text_embedder: torch.nn.Module, batch: list[ItemInfo], device: torch.device, arch_full: str):
|
| 20 |
+
prompts = [item.caption for item in batch]
|
| 21 |
+
autocast_dtype = torch.bfloat16 if text_embedder.dtype.itemsize == 1 else text_embedder.dtype # use bfloat16 for fp8 models
|
| 22 |
+
with torch.autocast(device_type=device.type, dtype=autocast_dtype), torch.no_grad():
|
| 23 |
+
ctx_vec = text_embedder(prompts)
|
| 24 |
+
ctx_vec = ctx_vec.cpu() # [1, 512, 15360]
|
| 25 |
+
|
| 26 |
+
# save prompt cache
|
| 27 |
+
for item, _ctx_vec in zip(batch, ctx_vec):
|
| 28 |
+
save_text_encoder_output_cache_flux_2(item, _ctx_vec, arch_full=arch_full)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def main():
|
| 32 |
+
parser = cache_text_encoder_outputs.setup_parser_common()
|
| 33 |
+
parser = flux_2_setup_parser(parser)
|
| 34 |
+
|
| 35 |
+
args = parser.parse_args()
|
| 36 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 37 |
+
|
| 38 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 39 |
+
device = torch.device(device)
|
| 40 |
+
|
| 41 |
+
# Load dataset config
|
| 42 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 43 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 44 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 45 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=model_version_info.architecture)
|
| 46 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 47 |
+
|
| 48 |
+
datasets = train_dataset_group.datasets
|
| 49 |
+
|
| 50 |
+
# prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset
|
| 51 |
+
all_cache_files_for_dataset, all_cache_paths_for_dataset = cache_text_encoder_outputs.prepare_cache_files_and_paths(datasets)
|
| 52 |
+
|
| 53 |
+
# Load Mistral 3 or Qwen-3 text encoder
|
| 54 |
+
m3_dtype = torch.float8_e4m3fn if args.fp8_text_encoder else torch.bfloat16
|
| 55 |
+
text_embedder = flux2_utils.load_text_embedder(
|
| 56 |
+
model_version_info, args.text_encoder, dtype=m3_dtype, device=device, disable_mmap=True
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# Encode with Mistral 3 or Qwen-3 text encoder
|
| 60 |
+
logger.info("Encoding with text encoder")
|
| 61 |
+
|
| 62 |
+
def encode_for_text_encoder(batch: list[ItemInfo]):
|
| 63 |
+
nonlocal text_embedder
|
| 64 |
+
encode_and_save_batch(text_embedder, batch, device, model_version_info.architecture_full)
|
| 65 |
+
|
| 66 |
+
cache_text_encoder_outputs.process_text_encoder_batches(
|
| 67 |
+
args.num_workers,
|
| 68 |
+
args.skip_existing,
|
| 69 |
+
args.batch_size,
|
| 70 |
+
datasets,
|
| 71 |
+
all_cache_files_for_dataset,
|
| 72 |
+
all_cache_paths_for_dataset,
|
| 73 |
+
encode_for_text_encoder,
|
| 74 |
+
)
|
| 75 |
+
del text_embedder
|
| 76 |
+
|
| 77 |
+
# remove cache files not in dataset
|
| 78 |
+
cache_text_encoder_outputs.post_process_cache_files(
|
| 79 |
+
datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def flux_2_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 84 |
+
parser.add_argument("--text_encoder", type=str, default=None, required=True, help="text encoder (mistral 3) checkpoint path")
|
| 85 |
+
parser.add_argument("--fp8_text_encoder", action="store_true", help="use fp8 for Text Encoder model")
|
| 86 |
+
flux2_utils.add_model_version_args(parser)
|
| 87 |
+
return parser
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
main()
|
src/musubi_tuner/flux_2_generate_image.py
ADDED
|
@@ -0,0 +1,1214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import gc
|
| 3 |
+
from importlib.util import find_spec
|
| 4 |
+
import random
|
| 5 |
+
import os
|
| 6 |
+
import time
|
| 7 |
+
import copy
|
| 8 |
+
from typing import Tuple, Optional, List, Any, Dict
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from safetensors.torch import load_file, save_file
|
| 12 |
+
from safetensors import safe_open
|
| 13 |
+
|
| 14 |
+
from musubi_tuner.flux_2 import flux2_utils
|
| 15 |
+
from musubi_tuner.flux_2 import flux2_models
|
| 16 |
+
from musubi_tuner.utils import model_utils
|
| 17 |
+
from musubi_tuner.utils.lora_utils import filter_lora_state_dict
|
| 18 |
+
|
| 19 |
+
lycoris_available = find_spec("lycoris") is not None
|
| 20 |
+
|
| 21 |
+
from musubi_tuner.networks import lora_flux_2
|
| 22 |
+
from musubi_tuner.utils.device_utils import clean_memory_on_device
|
| 23 |
+
from musubi_tuner.hv_generate_video import get_time_flag, save_images_grid, setup_parser_compile, synchronize_device
|
| 24 |
+
from musubi_tuner.wan_generate_video import merge_lora_weights
|
| 25 |
+
|
| 26 |
+
import logging
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
logging.basicConfig(level=logging.INFO)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class GenerationSettings:
|
| 33 |
+
def __init__(self, device: torch.device, dit_weight_dtype: Optional[torch.dtype] = None):
|
| 34 |
+
self.device = device
|
| 35 |
+
self.dit_weight_dtype = dit_weight_dtype # not used currently because model may be optimized
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def parse_args() -> argparse.Namespace:
|
| 39 |
+
"""parse command line arguments"""
|
| 40 |
+
parser = argparse.ArgumentParser(description="FLUX.2 inference script")
|
| 41 |
+
|
| 42 |
+
# WAN arguments
|
| 43 |
+
# parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).")
|
| 44 |
+
# parser.add_argument(
|
| 45 |
+
# "--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample."
|
| 46 |
+
# )
|
| 47 |
+
|
| 48 |
+
parser.add_argument("--dit", type=str, default=None, help="DiT directory or path")
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
"--disable_numpy_memmap", action="store_true", help="Disable numpy memmap when loading safetensors. Default is False."
|
| 51 |
+
)
|
| 52 |
+
parser.add_argument("--vae", type=str, default=None, help="AE directory or path")
|
| 53 |
+
parser.add_argument("--text_encoder", type=str, required=True, help="Text Encoder Mistral 3/Qwen 3 directory or path")
|
| 54 |
+
|
| 55 |
+
# LoRA
|
| 56 |
+
parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path")
|
| 57 |
+
parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier")
|
| 58 |
+
parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns")
|
| 59 |
+
parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns")
|
| 60 |
+
parser.add_argument(
|
| 61 |
+
"--save_merged_model",
|
| 62 |
+
type=str,
|
| 63 |
+
default=None,
|
| 64 |
+
help="Save merged model to path. If specified, no inference will be performed.",
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# inference
|
| 68 |
+
parser.add_argument(
|
| 69 |
+
"--guidance_scale", type=float, default=4.0, help="Guidance scale for classifier free guidance. Default is 4.0."
|
| 70 |
+
)
|
| 71 |
+
parser.add_argument("--prompt", type=str, default=None, help="prompt for generation")
|
| 72 |
+
parser.add_argument(
|
| 73 |
+
"--negative_prompt",
|
| 74 |
+
type=str,
|
| 75 |
+
default=None,
|
| 76 |
+
help="negative prompt for generation, default is None (` ` for non-distilled model)",
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
parser.add_argument("--image_size", type=int, nargs=2, default=[1024, 1024], help="image size, height and width")
|
| 80 |
+
parser.add_argument(
|
| 81 |
+
"--control_image_path",
|
| 82 |
+
nargs="*",
|
| 83 |
+
type=str,
|
| 84 |
+
default=None,
|
| 85 |
+
help="path to control (reference) image(s) for Flux 2 image edit",
|
| 86 |
+
)
|
| 87 |
+
parser.add_argument(
|
| 88 |
+
"--no_resize_control", action="store_true", help="Do not resize control image (default is to resize if too large)"
|
| 89 |
+
)
|
| 90 |
+
parser.add_argument("--infer_steps", type=int, default=50, help="number of inference steps, default is 25")
|
| 91 |
+
parser.add_argument("--save_path", type=str, required=True, help="path to save generated video")
|
| 92 |
+
parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.")
|
| 93 |
+
# parser.add_argument(
|
| 94 |
+
# "--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False."
|
| 95 |
+
# )
|
| 96 |
+
parser.add_argument(
|
| 97 |
+
"--embedded_cfg_scale",
|
| 98 |
+
type=float,
|
| 99 |
+
default=4.0,
|
| 100 |
+
help="Embeded CFG scale (distilled CFG Scale), default is 4.0. All klein models ignore this.",
|
| 101 |
+
)
|
| 102 |
+
# parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference")
|
| 103 |
+
# parser.add_argument(
|
| 104 |
+
# "--image_path",
|
| 105 |
+
# type=str,
|
| 106 |
+
# default=None,
|
| 107 |
+
# help="path to image for image2video inference. If `;;;` is used, it will be used as section images. The notation is same as `--prompt`.",
|
| 108 |
+
# )
|
| 109 |
+
|
| 110 |
+
# Flow Matching
|
| 111 |
+
parser.add_argument(
|
| 112 |
+
"--flow_shift",
|
| 113 |
+
type=float,
|
| 114 |
+
default=None,
|
| 115 |
+
help="Shift factor for flow matching schedulers. Default is None (FLUX.2 default).",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model")
|
| 119 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8")
|
| 120 |
+
|
| 121 |
+
parser.add_argument("--fp8_text_encoder", action="store_true", help="use fp8 for Text Encoder (Mistral 3)")
|
| 122 |
+
parser.add_argument(
|
| 123 |
+
"--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU"
|
| 124 |
+
)
|
| 125 |
+
parser.add_argument(
|
| 126 |
+
"--attn_mode",
|
| 127 |
+
type=str,
|
| 128 |
+
default="torch",
|
| 129 |
+
choices=["flash", "torch", "sageattn", "xformers", "sdpa"], # "flash2", "flash3",
|
| 130 |
+
help="attention mode",
|
| 131 |
+
)
|
| 132 |
+
parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model")
|
| 133 |
+
parser.add_argument(
|
| 134 |
+
"--use_pinned_memory_for_block_swap",
|
| 135 |
+
action="store_true",
|
| 136 |
+
help="use pinned memory for block swapping, which may speed up data transfer between CPU and GPU but uses more shared GPU memory on Windows",
|
| 137 |
+
)
|
| 138 |
+
parser.add_argument(
|
| 139 |
+
"--output_type",
|
| 140 |
+
type=str,
|
| 141 |
+
default="images",
|
| 142 |
+
choices=["images", "latent", "latent_images"],
|
| 143 |
+
help="output type",
|
| 144 |
+
)
|
| 145 |
+
parser.add_argument("--no_metadata", action="store_true", help="do not save metadata")
|
| 146 |
+
parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference")
|
| 147 |
+
parser.add_argument(
|
| 148 |
+
"--lycoris", action="store_true", help=f"use lycoris for inference{'' if lycoris_available else ' (not available)'}"
|
| 149 |
+
)
|
| 150 |
+
setup_parser_compile(parser)
|
| 151 |
+
|
| 152 |
+
# New arguments for batch and interactive modes
|
| 153 |
+
parser.add_argument("--from_file", type=str, default=None, help="Read prompts from a file")
|
| 154 |
+
parser.add_argument("--interactive", action="store_true", help="Interactive mode: read prompts from console")
|
| 155 |
+
|
| 156 |
+
flux2_utils.add_model_version_args(parser)
|
| 157 |
+
|
| 158 |
+
args = parser.parse_args()
|
| 159 |
+
|
| 160 |
+
# Validate arguments
|
| 161 |
+
if args.from_file and args.interactive:
|
| 162 |
+
raise ValueError("Cannot use both --from_file and --interactive at the same time")
|
| 163 |
+
|
| 164 |
+
if args.latent_path is None or len(args.latent_path) == 0:
|
| 165 |
+
if args.prompt is None and not args.from_file and not args.interactive:
|
| 166 |
+
raise ValueError("Either --prompt, --from_file or --interactive must be specified")
|
| 167 |
+
|
| 168 |
+
if args.lycoris and not lycoris_available:
|
| 169 |
+
raise ValueError("install lycoris: https://github.com/KohakuBlueleaf/LyCORIS")
|
| 170 |
+
|
| 171 |
+
return args
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def parse_prompt_line(line: str) -> Dict[str, Any]:
|
| 175 |
+
"""Parse a prompt line into a dictionary of argument overrides
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
line: Prompt line with options
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
Dict[str, Any]: Dictionary of argument overrides
|
| 182 |
+
"""
|
| 183 |
+
# TODO common function with hv_train_network.line_to_prompt_dict
|
| 184 |
+
if line.strip().startswith("--"): # No prompt
|
| 185 |
+
parts = (" " + line.strip()).split(" --")
|
| 186 |
+
prompt = None
|
| 187 |
+
else:
|
| 188 |
+
parts = line.split(" --")
|
| 189 |
+
prompt = parts[0].strip()
|
| 190 |
+
parts = parts[1:]
|
| 191 |
+
|
| 192 |
+
# Create dictionary of overrides
|
| 193 |
+
overrides = {} if prompt is None else {"prompt": prompt}
|
| 194 |
+
overrides["control_image_path"] = []
|
| 195 |
+
|
| 196 |
+
for part in parts:
|
| 197 |
+
if not part.strip():
|
| 198 |
+
continue
|
| 199 |
+
option_parts = part.split(" ", 1)
|
| 200 |
+
option = option_parts[0].strip()
|
| 201 |
+
value = option_parts[1].strip() if len(option_parts) > 1 else ""
|
| 202 |
+
|
| 203 |
+
# Map options to argument names
|
| 204 |
+
if option == "w":
|
| 205 |
+
overrides["image_size_width"] = int(value)
|
| 206 |
+
elif option == "h":
|
| 207 |
+
overrides["image_size_height"] = int(value)
|
| 208 |
+
elif option == "d":
|
| 209 |
+
overrides["seed"] = int(value)
|
| 210 |
+
elif option == "s":
|
| 211 |
+
overrides["infer_steps"] = int(value)
|
| 212 |
+
elif option == "g" or option == "l":
|
| 213 |
+
overrides["guidance_scale"] = float(value)
|
| 214 |
+
elif option == "fs":
|
| 215 |
+
overrides["flow_shift"] = float(value)
|
| 216 |
+
elif option == "i":
|
| 217 |
+
overrides["image_path"] = value
|
| 218 |
+
# elif option == "im":
|
| 219 |
+
# overrides["image_mask_path"] = value
|
| 220 |
+
# elif option == "cn":
|
| 221 |
+
# overrides["control_path"] = value
|
| 222 |
+
elif option == "n":
|
| 223 |
+
overrides["negative_prompt"] = value
|
| 224 |
+
elif option == "ci": # control_image_path
|
| 225 |
+
overrides["control_image_path"].append(value)
|
| 226 |
+
|
| 227 |
+
# If no control_image_path was provided, remove the empty list
|
| 228 |
+
if not overrides["control_image_path"]:
|
| 229 |
+
del overrides["control_image_path"]
|
| 230 |
+
|
| 231 |
+
return overrides
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def apply_overrides(args: argparse.Namespace, overrides: Dict[str, Any]) -> argparse.Namespace:
|
| 235 |
+
"""Apply overrides to args
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
args: Original arguments
|
| 239 |
+
overrides: Dictionary of overrides
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
argparse.Namespace: New arguments with overrides applied
|
| 243 |
+
"""
|
| 244 |
+
args_copy = copy.deepcopy(args)
|
| 245 |
+
|
| 246 |
+
for key, value in overrides.items():
|
| 247 |
+
if key == "image_size_width":
|
| 248 |
+
args_copy.image_size[1] = value
|
| 249 |
+
elif key == "image_size_height":
|
| 250 |
+
args_copy.image_size[0] = value
|
| 251 |
+
else:
|
| 252 |
+
setattr(args_copy, key, value)
|
| 253 |
+
|
| 254 |
+
return args_copy
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def check_inputs(args: argparse.Namespace) -> Tuple[int, int]:
|
| 258 |
+
"""Validate video size and length
|
| 259 |
+
|
| 260 |
+
Args:
|
| 261 |
+
args: command line arguments
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
Tuple[int, int]: (height, width)
|
| 265 |
+
"""
|
| 266 |
+
height = args.image_size[0]
|
| 267 |
+
width = args.image_size[1]
|
| 268 |
+
|
| 269 |
+
if height % 16 != 0 or width % 16 != 0:
|
| 270 |
+
raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
|
| 271 |
+
|
| 272 |
+
return height, width
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# region DiT model
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def load_dit_model(
|
| 279 |
+
args: argparse.Namespace, device: torch.device, dit_weight_dtype: Optional[torch.dtype] = None
|
| 280 |
+
) -> flux2_models.Flux2:
|
| 281 |
+
"""load DiT model
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
args: command line arguments
|
| 285 |
+
device: device to use
|
| 286 |
+
dit_dtype: data type for the model
|
| 287 |
+
dit_weight_dtype: data type for the model weights. None for as-is
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
flux2_models.Flux2: DiT model
|
| 291 |
+
"""
|
| 292 |
+
# If LyCORIS is enabled, we will load the model to CPU and then merge LoRA weights (static method)
|
| 293 |
+
|
| 294 |
+
loading_device = "cpu"
|
| 295 |
+
if args.blocks_to_swap == 0 and not args.lycoris:
|
| 296 |
+
loading_device = device
|
| 297 |
+
|
| 298 |
+
# load LoRA weights
|
| 299 |
+
if not args.lycoris and args.lora_weight is not None and len(args.lora_weight) > 0:
|
| 300 |
+
lora_weights_list = []
|
| 301 |
+
for lora_weight in args.lora_weight:
|
| 302 |
+
logger.info(f"Loading LoRA weight from: {lora_weight}")
|
| 303 |
+
lora_sd = load_file(lora_weight) # load on CPU, dtype is as is
|
| 304 |
+
lora_sd = filter_lora_state_dict(lora_sd, args.include_patterns, args.exclude_patterns)
|
| 305 |
+
lora_weights_list.append(lora_sd)
|
| 306 |
+
else:
|
| 307 |
+
lora_weights_list = None
|
| 308 |
+
|
| 309 |
+
loading_weight_dtype = dit_weight_dtype
|
| 310 |
+
if args.fp8_scaled and not args.lycoris:
|
| 311 |
+
loading_weight_dtype = None # we will load weights as-is and then optimize to fp8
|
| 312 |
+
elif args.lycoris:
|
| 313 |
+
loading_weight_dtype = torch.bfloat16 # lycoris requires bfloat16 or float16, because it merges weights
|
| 314 |
+
|
| 315 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 316 |
+
model = flux2_utils.load_flow_model(
|
| 317 |
+
device,
|
| 318 |
+
model_version_info,
|
| 319 |
+
args.dit,
|
| 320 |
+
args.attn_mode,
|
| 321 |
+
False,
|
| 322 |
+
loading_device,
|
| 323 |
+
loading_weight_dtype,
|
| 324 |
+
args.fp8_scaled and not args.lycoris,
|
| 325 |
+
lora_weights_list,
|
| 326 |
+
args.lora_multiplier,
|
| 327 |
+
args.disable_numpy_memmap,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# merge LoRA weights
|
| 331 |
+
if args.lycoris:
|
| 332 |
+
if args.lora_weight is not None and len(args.lora_weight) > 0:
|
| 333 |
+
merge_lora_weights(
|
| 334 |
+
lora_flux_2,
|
| 335 |
+
model,
|
| 336 |
+
args.lora_weight,
|
| 337 |
+
args.lora_multiplier,
|
| 338 |
+
args.include_patterns,
|
| 339 |
+
args.exclude_patterns,
|
| 340 |
+
device,
|
| 341 |
+
lycoris=True,
|
| 342 |
+
save_merged_model=args.save_merged_model,
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
if args.fp8_scaled:
|
| 346 |
+
# load state dict as-is and optimize to fp8
|
| 347 |
+
state_dict = model.state_dict()
|
| 348 |
+
|
| 349 |
+
# if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy)
|
| 350 |
+
move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU
|
| 351 |
+
# state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=args.fp8_fast)
|
| 352 |
+
|
| 353 |
+
from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch, optimize_state_dict_with_fp8
|
| 354 |
+
|
| 355 |
+
# inplace optimization
|
| 356 |
+
state_dict = optimize_state_dict_with_fp8(
|
| 357 |
+
state_dict,
|
| 358 |
+
device,
|
| 359 |
+
flux2_models.FP8_OPTIMIZATION_TARGET_KEYS,
|
| 360 |
+
flux2_models.FP8_OPTIMIZATION_EXCLUDE_KEYS,
|
| 361 |
+
move_to_device=move_to_device,
|
| 362 |
+
)
|
| 363 |
+
apply_fp8_monkey_patch(model, state_dict, use_scaled_mm=False) # args.scaled_mm)
|
| 364 |
+
|
| 365 |
+
info = model.load_state_dict(state_dict, strict=True, assign=True)
|
| 366 |
+
logger.info(f"Loaded FP8 optimized weights: {info}")
|
| 367 |
+
|
| 368 |
+
# if we only want to save the model, we can skip the rest of the setup but still return the model
|
| 369 |
+
if args.save_merged_model:
|
| 370 |
+
return model
|
| 371 |
+
|
| 372 |
+
if not args.fp8_scaled:
|
| 373 |
+
# simple cast to dit_weight_dtype
|
| 374 |
+
target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict)
|
| 375 |
+
target_device = None
|
| 376 |
+
|
| 377 |
+
if dit_weight_dtype is not None: # in case of args.fp8 and not args.fp8_scaled
|
| 378 |
+
logger.info(f"Convert model to {dit_weight_dtype}")
|
| 379 |
+
target_dtype = dit_weight_dtype
|
| 380 |
+
|
| 381 |
+
if args.blocks_to_swap == 0:
|
| 382 |
+
logger.info(f"Move model to device: {device}")
|
| 383 |
+
target_device = device
|
| 384 |
+
|
| 385 |
+
model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations
|
| 386 |
+
|
| 387 |
+
if args.blocks_to_swap > 0:
|
| 388 |
+
logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}")
|
| 389 |
+
model.enable_block_swap(
|
| 390 |
+
args.blocks_to_swap, device, supports_backward=False, use_pinned_memory=args.use_pinned_memory_for_block_swap
|
| 391 |
+
)
|
| 392 |
+
model.move_to_device_except_swap_blocks(device)
|
| 393 |
+
model.prepare_block_swap_before_forward()
|
| 394 |
+
else:
|
| 395 |
+
# make sure the model is on the right device
|
| 396 |
+
model.to(device)
|
| 397 |
+
|
| 398 |
+
if args.compile:
|
| 399 |
+
model = model_utils.compile_transformer(
|
| 400 |
+
args, model, [model.double_blocks, model.single_blocks], disable_linear=args.blocks_to_swap > 0
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
model.eval().requires_grad_(False)
|
| 404 |
+
clean_memory_on_device(device)
|
| 405 |
+
|
| 406 |
+
return model
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def decode_latent(ae: flux2_models.AutoEncoder, latent: torch.Tensor, device: torch.device) -> torch.Tensor:
|
| 410 |
+
logger.info("Decoding image...")
|
| 411 |
+
if latent.ndim == 3:
|
| 412 |
+
latent = latent.unsqueeze(0) # add batch dimension
|
| 413 |
+
|
| 414 |
+
ae.to(device)
|
| 415 |
+
with torch.no_grad():
|
| 416 |
+
pixels = ae.decode(latent.to(device, ae.dtype)) # decode to pixels
|
| 417 |
+
pixels = pixels.to("cpu")
|
| 418 |
+
ae.to("cpu")
|
| 419 |
+
|
| 420 |
+
logger.info(f"Decoded. Pixel shape {pixels.shape}")
|
| 421 |
+
return pixels[0] # remove batch dimension
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def prepare_image_inputs(
|
| 425 |
+
args: argparse.Namespace, device: torch.device, ae: flux2_models.AutoEncoder
|
| 426 |
+
) -> Tuple[int, int, Optional[List[torch.Tensor]]]:
|
| 427 |
+
"""Prepare image-related inputs for FLUX.2: AE encoding."""
|
| 428 |
+
height, width = check_inputs(args)
|
| 429 |
+
|
| 430 |
+
if args.control_image_path is not None and len(args.control_image_path):
|
| 431 |
+
limit_size = (1024, 1024) if len(args.control_image_path) > 1 else (2024, 2024)
|
| 432 |
+
if args.no_resize_control:
|
| 433 |
+
limit_size = None
|
| 434 |
+
|
| 435 |
+
img_ctx_prep = []
|
| 436 |
+
for image_path in args.control_image_path:
|
| 437 |
+
image_tensor, _, _ = flux2_utils.preprocess_control_image(image_path, limit_size)
|
| 438 |
+
img_ctx_prep.append(image_tensor)
|
| 439 |
+
|
| 440 |
+
# AE encoding
|
| 441 |
+
logger.info("Encoding control image to latent space with AE")
|
| 442 |
+
ae_original_device = ae.device
|
| 443 |
+
ae.to(device)
|
| 444 |
+
|
| 445 |
+
control_latent = []
|
| 446 |
+
with torch.no_grad():
|
| 447 |
+
# Encode each reference image
|
| 448 |
+
for img in img_ctx_prep:
|
| 449 |
+
encoded = ae.encode(img.to(device, dtype=ae.dtype))[0] # C, H, W
|
| 450 |
+
control_latent.append(encoded.to(torch.bfloat16).to("cpu"))
|
| 451 |
+
|
| 452 |
+
ae.to(ae_original_device) # Move VAE back to its original device
|
| 453 |
+
clean_memory_on_device(device)
|
| 454 |
+
else:
|
| 455 |
+
control_latent = None
|
| 456 |
+
|
| 457 |
+
return height, width, control_latent
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def prepare_text_inputs(
|
| 461 |
+
args: argparse.Namespace, device: torch.device, shared_models: Optional[Dict] = None
|
| 462 |
+
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
| 463 |
+
"""Prepare text-related inputs for I2V: LLM and TextEncoder encoding."""
|
| 464 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 465 |
+
|
| 466 |
+
# load text encoder: conds_cache holds cached encodings for prompts without padding
|
| 467 |
+
conds_cache = {}
|
| 468 |
+
if shared_models is not None:
|
| 469 |
+
text_embedder = shared_models.get("text_embedder")
|
| 470 |
+
if "conds_cache" in shared_models: # Use shared cache if available
|
| 471 |
+
conds_cache = shared_models["conds_cache"]
|
| 472 |
+
# text_encoder is on device (batched inference) or CPU (interactive inference)
|
| 473 |
+
else: # Load if not in shared_models
|
| 474 |
+
te_dtype = torch.float8_e4m3fn if args.fp8_text_encoder else torch.bfloat16
|
| 475 |
+
text_embedder = flux2_utils.load_text_embedder(
|
| 476 |
+
model_version_info, args.text_encoder, dtype=te_dtype, device=device, disable_mmap=True
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
# Store original devices to move back later if they were shared. This does nothing if shared_models is None
|
| 480 |
+
text_encoder_original_device = text_embedder.device if text_embedder else None
|
| 481 |
+
|
| 482 |
+
logger.info("Encoding prompt with Text Encoders")
|
| 483 |
+
|
| 484 |
+
# Ensure text_encoder is not None before proceeding
|
| 485 |
+
if not text_embedder:
|
| 486 |
+
raise ValueError("Text embedder is not loaded properly.")
|
| 487 |
+
|
| 488 |
+
# Define a function to move models to device if needed
|
| 489 |
+
# This is to avoid moving models if not needed, especially in interactive mode
|
| 490 |
+
model_is_moved = False
|
| 491 |
+
|
| 492 |
+
def move_models_to_device_if_needed():
|
| 493 |
+
nonlocal model_is_moved
|
| 494 |
+
nonlocal shared_models
|
| 495 |
+
|
| 496 |
+
if model_is_moved:
|
| 497 |
+
return
|
| 498 |
+
model_is_moved = True
|
| 499 |
+
|
| 500 |
+
logger.info(f"Moving DiT and Text Encoders to appropriate device: {device} or CPU")
|
| 501 |
+
if shared_models and "model" in shared_models: # DiT model is shared
|
| 502 |
+
if args.blocks_to_swap > 0:
|
| 503 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 504 |
+
time.sleep(5)
|
| 505 |
+
model = shared_models["model"]
|
| 506 |
+
model.to("cpu")
|
| 507 |
+
clean_memory_on_device(device) # clean memory on device before moving models
|
| 508 |
+
|
| 509 |
+
text_embedder.to(device)
|
| 510 |
+
|
| 511 |
+
prompt = args.prompt
|
| 512 |
+
if prompt in conds_cache:
|
| 513 |
+
ctx_vec = conds_cache[prompt]
|
| 514 |
+
else:
|
| 515 |
+
move_models_to_device_if_needed()
|
| 516 |
+
|
| 517 |
+
with torch.no_grad(), torch.autocast(device_type=device.type, dtype=torch.bfloat16):
|
| 518 |
+
ctx_vec = text_embedder([prompt]) # [1, 512, 15360]
|
| 519 |
+
ctx_vec = ctx_vec.cpu()
|
| 520 |
+
conds_cache[prompt] = ctx_vec
|
| 521 |
+
|
| 522 |
+
negative_prompt = args.negative_prompt
|
| 523 |
+
negative_ctx_vec = None
|
| 524 |
+
if not model_version_info.guidance_distilled:
|
| 525 |
+
if negative_prompt is None:
|
| 526 |
+
negative_prompt = " " # for non-distilled model, use empty string as negative prompt
|
| 527 |
+
if negative_prompt in conds_cache:
|
| 528 |
+
negative_ctx_vec = conds_cache[negative_prompt]
|
| 529 |
+
else:
|
| 530 |
+
move_models_to_device_if_needed()
|
| 531 |
+
|
| 532 |
+
with torch.no_grad(), torch.autocast(device_type=device.type, dtype=torch.bfloat16):
|
| 533 |
+
negative_ctx_vec = text_embedder([negative_prompt]) # [1, 512, 15360]
|
| 534 |
+
negative_ctx_vec = negative_ctx_vec.cpu()
|
| 535 |
+
conds_cache[negative_prompt] = negative_ctx_vec
|
| 536 |
+
|
| 537 |
+
if not (shared_models and "text_embedder" in shared_models): # if loaded locally
|
| 538 |
+
del text_embedder
|
| 539 |
+
else: # if shared, move back to original device (likely CPU)
|
| 540 |
+
if text_embedder:
|
| 541 |
+
text_embedder.to(text_encoder_original_device)
|
| 542 |
+
|
| 543 |
+
gc.collect() # Force cleanup of Text Encoder from GPU memory
|
| 544 |
+
clean_memory_on_device(device)
|
| 545 |
+
|
| 546 |
+
arg_c = {"ctx_vec": ctx_vec, "prompt": prompt}
|
| 547 |
+
if negative_ctx_vec is None:
|
| 548 |
+
arg_null = None
|
| 549 |
+
else:
|
| 550 |
+
arg_null = {"ctx_vec": negative_ctx_vec, "prompt": negative_prompt}
|
| 551 |
+
|
| 552 |
+
return arg_c, arg_null
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def prepare_i2v_inputs(
|
| 556 |
+
args: argparse.Namespace, device: torch.device, ae: flux2_models.AutoEncoder, shared_models: Optional[Dict] = None
|
| 557 |
+
) -> Tuple[int, int, Dict[str, Any], Optional[torch.Tensor]]:
|
| 558 |
+
"""Prepare inputs for image2video generation: image encoding, text encoding, and AE encoding.
|
| 559 |
+
|
| 560 |
+
Args:
|
| 561 |
+
args: command line arguments
|
| 562 |
+
device: device to use
|
| 563 |
+
ae: AE model instance
|
| 564 |
+
shared_models: dictionary containing pre-loaded models (mainly for DiT)
|
| 565 |
+
|
| 566 |
+
Returns:
|
| 567 |
+
Tuple[int, int, Dict[str, Any], Optional[torch.Tensor]]: (height, width, context, end_latent)
|
| 568 |
+
"""
|
| 569 |
+
# prepare image inputs
|
| 570 |
+
height, width, control_latent = prepare_image_inputs(args, device, ae)
|
| 571 |
+
|
| 572 |
+
# prepare text inputs
|
| 573 |
+
ctx_nctx = prepare_text_inputs(args, device, shared_models)
|
| 574 |
+
|
| 575 |
+
return height, width, ctx_nctx, control_latent
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def generate(
|
| 579 |
+
args: argparse.Namespace,
|
| 580 |
+
gen_settings: GenerationSettings,
|
| 581 |
+
shared_models: Optional[Dict] = None,
|
| 582 |
+
precomputed_image_data: Optional[tuple[int, int, Optional[torch.Tensor]]] = None,
|
| 583 |
+
precomputed_text_data: Optional[tuple[Dict, Dict]] = None,
|
| 584 |
+
) -> tuple[Optional[flux2_models.AutoEncoder], torch.Tensor]: # AE can be Optional
|
| 585 |
+
"""main function for generation
|
| 586 |
+
|
| 587 |
+
Args:
|
| 588 |
+
args: command line arguments
|
| 589 |
+
shared_models: dictionary containing pre-loaded models (mainly for DiT)
|
| 590 |
+
precomputed_image_data: Optional tuple with precomputed image data (height, width, control_latent)
|
| 591 |
+
precomputed_text_data: Optional tuple with precomputed text data (context, context_null)
|
| 592 |
+
|
| 593 |
+
Returns:
|
| 594 |
+
tuple: (flux2_models.AutoEncoder model (vae) or None, torch.Tensor generated latent)
|
| 595 |
+
"""
|
| 596 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 597 |
+
device, dit_weight_dtype = (gen_settings.device, gen_settings.dit_weight_dtype)
|
| 598 |
+
vae_instance_for_return = None
|
| 599 |
+
|
| 600 |
+
# prepare seed
|
| 601 |
+
seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1)
|
| 602 |
+
args.seed = seed # set seed to args for saving
|
| 603 |
+
|
| 604 |
+
if precomputed_image_data is not None and precomputed_text_data is not None:
|
| 605 |
+
logger.info("Using precomputed image and text data.")
|
| 606 |
+
height, width, control_latent = precomputed_image_data
|
| 607 |
+
ctx_nctx = precomputed_text_data
|
| 608 |
+
|
| 609 |
+
# VAE is not loaded here if data is precomputed; decoding VAE is handled by caller (e.g., process_batch_prompts)
|
| 610 |
+
# vae_instance_for_return remains None
|
| 611 |
+
else:
|
| 612 |
+
# Load VAE if not precomputed (for single/interactive mode)
|
| 613 |
+
# shared_models for single/interactive might contain text/image encoders, but not VAE after `load_shared_models` change.
|
| 614 |
+
# So, VAE will be loaded here for single/interactive.
|
| 615 |
+
logger.info("No precomputed data. Preparing image and text inputs.")
|
| 616 |
+
if shared_models and "ae" in shared_models: # Should not happen with new load_shared_models
|
| 617 |
+
vae_instance_for_return = shared_models["ae"]
|
| 618 |
+
else:
|
| 619 |
+
# the dtype of VAE weights is float32, but we can load it as bfloat16 for better performance in future
|
| 620 |
+
vae_instance_for_return = flux2_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 621 |
+
|
| 622 |
+
height, width, ctx_nctx, control_latent = prepare_i2v_inputs(args, device, vae_instance_for_return, shared_models)
|
| 623 |
+
|
| 624 |
+
vae_instance_for_return.to("cpu")
|
| 625 |
+
|
| 626 |
+
context, context_null = ctx_nctx # unpack
|
| 627 |
+
if shared_models is None or "model" not in shared_models:
|
| 628 |
+
# load DiT model
|
| 629 |
+
model = load_dit_model(args, device, dit_weight_dtype)
|
| 630 |
+
|
| 631 |
+
if args.save_merged_model:
|
| 632 |
+
return None, None
|
| 633 |
+
|
| 634 |
+
if shared_models is not None:
|
| 635 |
+
shared_models["model"] = model
|
| 636 |
+
else:
|
| 637 |
+
# use shared model
|
| 638 |
+
model: flux2_models.Flux = shared_models["model"]
|
| 639 |
+
model.move_to_device_except_swap_blocks(device) # Handles block swap correctly
|
| 640 |
+
model.prepare_block_swap_before_forward()
|
| 641 |
+
|
| 642 |
+
# set random generator
|
| 643 |
+
seed_g = torch.Generator(device="cpu")
|
| 644 |
+
seed_g.manual_seed(seed)
|
| 645 |
+
|
| 646 |
+
logger.info(f"Image size: {height}x{width} (HxW), infer_steps: {args.infer_steps}")
|
| 647 |
+
|
| 648 |
+
# image generation ######
|
| 649 |
+
logger.info(f"Prompt: {context['prompt']}, Negative Prompt: {context_null['prompt'] if context_null is not None else 'N/A'}")
|
| 650 |
+
ctx_vec = context["ctx_vec"].to(device, dtype=torch.bfloat16)
|
| 651 |
+
ctx, ctx_ids = flux2_utils.prc_txt(ctx_vec)
|
| 652 |
+
if context_null is None:
|
| 653 |
+
negative_ctx_vec = None
|
| 654 |
+
ctx_null, ctx_null_ids = None, None
|
| 655 |
+
else:
|
| 656 |
+
negative_ctx_vec = context_null["ctx_vec"].to(device, dtype=torch.bfloat16)
|
| 657 |
+
ctx_null, ctx_null_ids = flux2_utils.prc_txt(negative_ctx_vec)
|
| 658 |
+
|
| 659 |
+
# make first noise with packed shape
|
| 660 |
+
# original: b,16,2*h//16,2*w//16, packed: b,h//16*w//16,16*2*2
|
| 661 |
+
packed_latent_height, packed_latent_width = height // 16, width // 16
|
| 662 |
+
noise_dtype = torch.float32
|
| 663 |
+
noise = torch.randn(1, 128, packed_latent_height, packed_latent_width, dtype=noise_dtype, generator=seed_g, device="cpu").to(
|
| 664 |
+
device, dtype=torch.bfloat16
|
| 665 |
+
)
|
| 666 |
+
x, x_ids = flux2_utils.prc_img(noise)
|
| 667 |
+
|
| 668 |
+
# prompt upsampling is not supported
|
| 669 |
+
|
| 670 |
+
if control_latent is not None:
|
| 671 |
+
ref_tokens, ref_ids = flux2_utils.pack_control_latent(control_latent)
|
| 672 |
+
del control_latent # free memory
|
| 673 |
+
ref_tokens = ref_tokens.to(device, dtype=torch.bfloat16)
|
| 674 |
+
ref_ids = ref_ids.to(device)
|
| 675 |
+
else:
|
| 676 |
+
ref_tokens = None
|
| 677 |
+
ref_ids = None
|
| 678 |
+
|
| 679 |
+
# denoise
|
| 680 |
+
timesteps = flux2_utils.get_schedule(args.infer_steps, x.shape[1], args.flow_shift)
|
| 681 |
+
if model_version_info.guidance_distilled:
|
| 682 |
+
x = flux2_utils.denoise(
|
| 683 |
+
model,
|
| 684 |
+
x,
|
| 685 |
+
x_ids,
|
| 686 |
+
ctx,
|
| 687 |
+
ctx_ids,
|
| 688 |
+
timesteps=timesteps,
|
| 689 |
+
guidance=args.embedded_cfg_scale,
|
| 690 |
+
img_cond_seq=ref_tokens,
|
| 691 |
+
img_cond_seq_ids=ref_ids,
|
| 692 |
+
)
|
| 693 |
+
else:
|
| 694 |
+
x = flux2_utils.denoise_cfg(
|
| 695 |
+
model,
|
| 696 |
+
x,
|
| 697 |
+
x_ids,
|
| 698 |
+
ctx,
|
| 699 |
+
ctx_ids,
|
| 700 |
+
ctx_null,
|
| 701 |
+
ctx_null_ids,
|
| 702 |
+
timesteps=timesteps,
|
| 703 |
+
guidance=args.guidance_scale,
|
| 704 |
+
img_cond_seq=ref_tokens,
|
| 705 |
+
img_cond_seq_ids=ref_ids,
|
| 706 |
+
)
|
| 707 |
+
x = torch.cat(flux2_utils.scatter_ids(x, x_ids)).squeeze(2)
|
| 708 |
+
return vae_instance_for_return, x
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
def save_latent(latent: torch.Tensor, args: argparse.Namespace, height: int, width: int) -> str:
|
| 712 |
+
"""Save latent to file
|
| 713 |
+
|
| 714 |
+
Args:
|
| 715 |
+
latent: Latent tensor
|
| 716 |
+
args: command line arguments
|
| 717 |
+
height: height of frame
|
| 718 |
+
width: width of frame
|
| 719 |
+
|
| 720 |
+
Returns:
|
| 721 |
+
str: Path to saved latent file
|
| 722 |
+
"""
|
| 723 |
+
save_path = args.save_path
|
| 724 |
+
os.makedirs(save_path, exist_ok=True)
|
| 725 |
+
time_flag = get_time_flag()
|
| 726 |
+
|
| 727 |
+
seed = args.seed
|
| 728 |
+
|
| 729 |
+
latent_path = f"{save_path}/{time_flag}_{seed}_latent.safetensors"
|
| 730 |
+
|
| 731 |
+
if args.no_metadata:
|
| 732 |
+
metadata = None
|
| 733 |
+
else:
|
| 734 |
+
metadata = {
|
| 735 |
+
"seeds": f"{seed}",
|
| 736 |
+
"prompt": f"{args.prompt}",
|
| 737 |
+
"height": f"{height}",
|
| 738 |
+
"width": f"{width}",
|
| 739 |
+
"infer_steps": f"{args.infer_steps}",
|
| 740 |
+
"embedded_cfg_scale": f"{args.embedded_cfg_scale}",
|
| 741 |
+
"guidance_scale": f"{args.guidance_scale}",
|
| 742 |
+
}
|
| 743 |
+
# if args.negative_prompt is not None:
|
| 744 |
+
# metadata["negative_prompt"] = f"{args.negative_prompt}"
|
| 745 |
+
|
| 746 |
+
sd = {"latent": latent.contiguous()}
|
| 747 |
+
save_file(sd, latent_path, metadata=metadata)
|
| 748 |
+
logger.info(f"Latent saved to: {latent_path}")
|
| 749 |
+
|
| 750 |
+
return latent_path
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
def save_images(sample: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str:
|
| 754 |
+
"""Save images to directory
|
| 755 |
+
|
| 756 |
+
Args:
|
| 757 |
+
sample: Video tensor
|
| 758 |
+
args: command line arguments
|
| 759 |
+
original_base_name: Original base name (if latents are loaded from files)
|
| 760 |
+
|
| 761 |
+
Returns:
|
| 762 |
+
str: Path to saved images directory
|
| 763 |
+
"""
|
| 764 |
+
save_path = args.save_path
|
| 765 |
+
os.makedirs(save_path, exist_ok=True)
|
| 766 |
+
time_flag = get_time_flag()
|
| 767 |
+
|
| 768 |
+
seed = args.seed
|
| 769 |
+
original_name = "" if original_base_name is None else f"_{original_base_name}"
|
| 770 |
+
image_name = f"{time_flag}_{seed}{original_name}"
|
| 771 |
+
sample = sample.unsqueeze(0).unsqueeze(2) # C,HW -> BCTHW, where B=1, C=3, T=1
|
| 772 |
+
sample = sample.to(torch.float32) # convert to float32 for numpy conversion
|
| 773 |
+
save_images_grid(sample, save_path, image_name, rescale=True, create_subdir=False)
|
| 774 |
+
logger.info(f"Sample images saved to: {save_path}/{image_name}")
|
| 775 |
+
|
| 776 |
+
return f"{save_path}/{image_name}"
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
def save_output(
|
| 780 |
+
args: argparse.Namespace,
|
| 781 |
+
ae: flux2_models.AutoEncoder, # Expect a VAE instance for decoding
|
| 782 |
+
latent: torch.Tensor,
|
| 783 |
+
device: torch.device,
|
| 784 |
+
original_base_names: Optional[List[str]] = None,
|
| 785 |
+
) -> None:
|
| 786 |
+
"""save output
|
| 787 |
+
|
| 788 |
+
Args:
|
| 789 |
+
args: command line arguments
|
| 790 |
+
vae: VAE model
|
| 791 |
+
latent: latent tensor
|
| 792 |
+
device: device to use
|
| 793 |
+
original_base_names: original base names (if latents are loaded from files)
|
| 794 |
+
"""
|
| 795 |
+
height, width = latent.shape[-2], latent.shape[-1] # BCTHW
|
| 796 |
+
height *= 16
|
| 797 |
+
width *= 16
|
| 798 |
+
# print(f"Saving output. Latent shape {latent.shape}; pixel shape {height}x{width}")
|
| 799 |
+
if args.output_type == "latent" or args.output_type == "latent_images":
|
| 800 |
+
# save latent
|
| 801 |
+
save_latent(latent, args, height, width)
|
| 802 |
+
if args.output_type == "latent":
|
| 803 |
+
return
|
| 804 |
+
|
| 805 |
+
if ae is None:
|
| 806 |
+
logger.error("AE is None, cannot decode latents for saving video/images.")
|
| 807 |
+
return
|
| 808 |
+
|
| 809 |
+
video = decode_latent(ae, latent, device)
|
| 810 |
+
|
| 811 |
+
if args.output_type == "images" or args.output_type == "latent_images":
|
| 812 |
+
# save images
|
| 813 |
+
original_name = "" if original_base_names is None else f"_{original_base_names[0]}"
|
| 814 |
+
save_images(video, args, original_name)
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
def preprocess_prompts_for_batch(prompt_lines: List[str], base_args: argparse.Namespace) -> List[Dict]:
|
| 818 |
+
"""Process multiple prompts for batch mode
|
| 819 |
+
|
| 820 |
+
Args:
|
| 821 |
+
prompt_lines: List of prompt lines
|
| 822 |
+
base_args: Base command line arguments
|
| 823 |
+
|
| 824 |
+
Returns:
|
| 825 |
+
List[Dict]: List of prompt data dictionaries
|
| 826 |
+
"""
|
| 827 |
+
prompts_data = []
|
| 828 |
+
|
| 829 |
+
for line in prompt_lines:
|
| 830 |
+
line = line.strip()
|
| 831 |
+
if not line or line.startswith("#"): # Skip empty lines and comments
|
| 832 |
+
continue
|
| 833 |
+
|
| 834 |
+
# Parse prompt line and create override dictionary
|
| 835 |
+
prompt_data = parse_prompt_line(line)
|
| 836 |
+
logger.info(f"Parsed prompt data: {prompt_data}")
|
| 837 |
+
prompts_data.append(prompt_data)
|
| 838 |
+
|
| 839 |
+
return prompts_data
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
def load_shared_models(args: argparse.Namespace) -> Dict:
|
| 843 |
+
"""Load shared models for batch processing or interactive mode.
|
| 844 |
+
Models are loaded to CPU to save memory. VAE is NOT loaded here.
|
| 845 |
+
DiT model is also NOT loaded here, handled by process_batch_prompts or generate.
|
| 846 |
+
|
| 847 |
+
Args:
|
| 848 |
+
args: Base command line arguments
|
| 849 |
+
|
| 850 |
+
Returns:
|
| 851 |
+
Dict: Dictionary of shared models (text/image encoders)
|
| 852 |
+
"""
|
| 853 |
+
shared_models = {}
|
| 854 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 855 |
+
|
| 856 |
+
# Load text encoders to CPU
|
| 857 |
+
m3_dtype = torch.float8_e4m3fn if args.fp8_text_encoder else torch.bfloat16
|
| 858 |
+
text_embedder = flux2_utils.load_text_embedder(
|
| 859 |
+
model_version_info, args.text_encoder, dtype=m3_dtype, device="cpu", disable_mmap=True
|
| 860 |
+
)
|
| 861 |
+
shared_models["text_embedder"] = text_embedder
|
| 862 |
+
|
| 863 |
+
return shared_models
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) -> None:
|
| 867 |
+
"""Process multiple prompts with model reuse and batched precomputation
|
| 868 |
+
|
| 869 |
+
Args:
|
| 870 |
+
prompts_data: List of prompt data dictionaries
|
| 871 |
+
args: Base command line arguments
|
| 872 |
+
"""
|
| 873 |
+
if not prompts_data:
|
| 874 |
+
logger.warning("No valid prompts found")
|
| 875 |
+
return
|
| 876 |
+
|
| 877 |
+
model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 878 |
+
gen_settings = get_generation_settings(args)
|
| 879 |
+
dit_weight_dtype = gen_settings.dit_weight_dtype
|
| 880 |
+
device = gen_settings.device
|
| 881 |
+
|
| 882 |
+
# 1. Precompute Image Data (AE and Image Encoders)
|
| 883 |
+
logger.info("Loading AE and Image Encoders for batch image preprocessing...")
|
| 884 |
+
ae_for_batch = flux2_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 885 |
+
|
| 886 |
+
all_precomputed_image_data = []
|
| 887 |
+
all_prompt_args_list = [apply_overrides(args, pd) for pd in prompts_data] # Create all arg instances first
|
| 888 |
+
|
| 889 |
+
logger.info("Preprocessing images and AE encoding for all prompts...")
|
| 890 |
+
|
| 891 |
+
# AE and Image Encoder to device for this phase, because we do not want to offload them to CPU
|
| 892 |
+
ae_for_batch.to(device)
|
| 893 |
+
|
| 894 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 895 |
+
logger.info(f"Image preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 896 |
+
# prepare_image_inputs will move ae/image_encoder to device temporarily
|
| 897 |
+
image_data = prepare_image_inputs(prompt_args_item, device, ae_for_batch)
|
| 898 |
+
all_precomputed_image_data.append(image_data)
|
| 899 |
+
|
| 900 |
+
# Models should be back on GPU because prepare_image_inputs moved them to the original device
|
| 901 |
+
ae_for_batch.to("cpu") # Move AE back to CPU
|
| 902 |
+
clean_memory_on_device(device)
|
| 903 |
+
|
| 904 |
+
# 2. Precompute Text Data (Text Encoder)
|
| 905 |
+
logger.info("Loading Text Encoder for batch text preprocessing...")
|
| 906 |
+
|
| 907 |
+
# Text Encoders loaded to CPU by load_text_encoder
|
| 908 |
+
m3_dtype = torch.float8_e4m3fn if args.fp8_text_encoder else torch.bfloat16
|
| 909 |
+
text_embedder_batch = flux2_utils.load_text_embedder(
|
| 910 |
+
model_version_info, args.text_encoder, dtype=m3_dtype, device=device, disable_mmap=True
|
| 911 |
+
)
|
| 912 |
+
|
| 913 |
+
# Text Encoders to device for this phase
|
| 914 |
+
text_embedder_batch.to(device) # Moved into prepare_text_inputs logic
|
| 915 |
+
|
| 916 |
+
all_precomputed_text_data = []
|
| 917 |
+
conds_cache_batch = {}
|
| 918 |
+
|
| 919 |
+
logger.info("Preprocessing text and LLM/TextEncoder encoding for all prompts...")
|
| 920 |
+
temp_shared_models_txt = {
|
| 921 |
+
"text_embedder": text_embedder_batch, # on GPU
|
| 922 |
+
"conds_cache": conds_cache_batch,
|
| 923 |
+
}
|
| 924 |
+
|
| 925 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 926 |
+
logger.info(f"Text preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 927 |
+
# prepare_text_inputs will move text_encoders to device temporarily
|
| 928 |
+
ctx_nctx = prepare_text_inputs(prompt_args_item, device, temp_shared_models_txt)
|
| 929 |
+
all_precomputed_text_data.append(ctx_nctx)
|
| 930 |
+
|
| 931 |
+
# Models should be removed from device after prepare_text_inputs
|
| 932 |
+
del text_embedder_batch, temp_shared_models_txt, conds_cache_batch
|
| 933 |
+
gc.collect() # Force cleanup of Text Encoder from GPU memory
|
| 934 |
+
clean_memory_on_device(device)
|
| 935 |
+
|
| 936 |
+
# 3. Load DiT Model once
|
| 937 |
+
logger.info("Loading DiT model for batch generation...")
|
| 938 |
+
# Use args from the first prompt for DiT loading (LoRA etc. should be consistent for a batch)
|
| 939 |
+
first_prompt_args = all_prompt_args_list[0]
|
| 940 |
+
dit_model = load_dit_model(first_prompt_args, device, dit_weight_dtype) # Load directly to target device if possible
|
| 941 |
+
|
| 942 |
+
if first_prompt_args.lora_weight is not None and len(first_prompt_args.lora_weight) > 0:
|
| 943 |
+
logger.info("Merging LoRA weights into DiT model...")
|
| 944 |
+
merge_lora_weights(
|
| 945 |
+
lora_flux_2,
|
| 946 |
+
dit_model,
|
| 947 |
+
first_prompt_args.lora_weight,
|
| 948 |
+
first_prompt_args.lora_multiplier,
|
| 949 |
+
first_prompt_args.include_patterns,
|
| 950 |
+
first_prompt_args.exclude_patterns,
|
| 951 |
+
device,
|
| 952 |
+
first_prompt_args.lycoris,
|
| 953 |
+
first_prompt_args.save_merged_model,
|
| 954 |
+
)
|
| 955 |
+
if first_prompt_args.save_merged_model:
|
| 956 |
+
logger.info("Merged DiT model saved. Skipping generation.")
|
| 957 |
+
del dit_model
|
| 958 |
+
gc.collect() # Force cleanup of DiT from GPU memory
|
| 959 |
+
clean_memory_on_device(device)
|
| 960 |
+
return
|
| 961 |
+
|
| 962 |
+
shared_models_for_generate = {"model": dit_model} # Pass DiT via shared_models
|
| 963 |
+
|
| 964 |
+
all_latents = []
|
| 965 |
+
|
| 966 |
+
logger.info("Generating latents for all prompts...")
|
| 967 |
+
with torch.no_grad():
|
| 968 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 969 |
+
current_image_data = all_precomputed_image_data[i]
|
| 970 |
+
current_text_data = all_precomputed_text_data[i]
|
| 971 |
+
|
| 972 |
+
logger.info(f"Generating latent for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 973 |
+
try:
|
| 974 |
+
# generate is called with precomputed data, so it won't load VAE/Text/Image encoders.
|
| 975 |
+
# It will use the DiT model from shared_models_for_generate.
|
| 976 |
+
# The VAE instance returned by generate will be None here.
|
| 977 |
+
_, latent = generate(
|
| 978 |
+
prompt_args_item, gen_settings, shared_models_for_generate, current_image_data, current_text_data
|
| 979 |
+
)
|
| 980 |
+
|
| 981 |
+
if latent is None: # and prompt_args_item.save_merged_model: # Should be caught earlier
|
| 982 |
+
continue
|
| 983 |
+
|
| 984 |
+
# Save latent if needed (using data from precomputed_image_data for H/W)
|
| 985 |
+
if prompt_args_item.output_type in ["latent", "latent_images"]:
|
| 986 |
+
height, width, _ = current_image_data
|
| 987 |
+
save_latent(latent, prompt_args_item, height, width)
|
| 988 |
+
|
| 989 |
+
all_latents.append(latent)
|
| 990 |
+
except Exception as e:
|
| 991 |
+
logger.error(f"Error generating latent for prompt: {prompt_args_item.prompt}. Error: {e}", exc_info=True)
|
| 992 |
+
all_latents.append(None) # Add placeholder for failed generations
|
| 993 |
+
continue
|
| 994 |
+
|
| 995 |
+
# Free DiT model
|
| 996 |
+
logger.info("Releasing DiT model from memory...")
|
| 997 |
+
if args.blocks_to_swap > 0:
|
| 998 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 999 |
+
time.sleep(5)
|
| 1000 |
+
|
| 1001 |
+
del shared_models_for_generate["model"]
|
| 1002 |
+
del dit_model
|
| 1003 |
+
gc.collect() # Force cleanup of DiT from GPU memory
|
| 1004 |
+
clean_memory_on_device(device)
|
| 1005 |
+
synchronize_device(device) # Ensure memory is freed before loading VAE for decoding
|
| 1006 |
+
|
| 1007 |
+
# 4. Decode latents and save outputs (using vae_for_batch)
|
| 1008 |
+
if args.output_type != "latent":
|
| 1009 |
+
logger.info("Decoding latents to videos/images using batched VAE...")
|
| 1010 |
+
ae_for_batch.to(device) # Move VAE to device for decoding
|
| 1011 |
+
|
| 1012 |
+
for i, latent in enumerate(all_latents):
|
| 1013 |
+
if latent is None: # Skip failed generations
|
| 1014 |
+
logger.warning(f"Skipping decoding for prompt {i + 1} due to previous error.")
|
| 1015 |
+
continue
|
| 1016 |
+
|
| 1017 |
+
current_args = all_prompt_args_list[i]
|
| 1018 |
+
logger.info(f"Decoding output {i + 1}/{len(all_latents)} for prompt: {current_args.prompt}")
|
| 1019 |
+
|
| 1020 |
+
# if args.output_type is "latent_images", we already saved latent above.
|
| 1021 |
+
# so we skip saving latent here.
|
| 1022 |
+
if current_args.output_type == "latent_images":
|
| 1023 |
+
current_args.output_type = "images"
|
| 1024 |
+
|
| 1025 |
+
# save_output expects latent to be [BCTHW] or [CTHW]. generate returns [BCTHW] (batch size 1).
|
| 1026 |
+
# latent[0] is correct if generate returns it with batch dim.
|
| 1027 |
+
# The latent from generate is (1, C, T, H, W)
|
| 1028 |
+
save_output(current_args, ae_for_batch, latent[0], device) # Pass vae_for_batch
|
| 1029 |
+
|
| 1030 |
+
ae_for_batch.to("cpu") # Move VAE back to CPU
|
| 1031 |
+
|
| 1032 |
+
del ae_for_batch
|
| 1033 |
+
clean_memory_on_device(device)
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
def process_interactive(args: argparse.Namespace) -> None:
|
| 1037 |
+
"""Process prompts in interactive mode
|
| 1038 |
+
|
| 1039 |
+
Args:
|
| 1040 |
+
args: Base command line arguments
|
| 1041 |
+
"""
|
| 1042 |
+
gen_settings = get_generation_settings(args)
|
| 1043 |
+
device = gen_settings.device
|
| 1044 |
+
shared_models = load_shared_models(args)
|
| 1045 |
+
shared_models["conds_cache"] = {} # Initialize empty cache for interactive mode
|
| 1046 |
+
|
| 1047 |
+
print("Interactive mode. Enter prompts (Ctrl+D or Ctrl+Z (Windows) to exit):")
|
| 1048 |
+
|
| 1049 |
+
try:
|
| 1050 |
+
import prompt_toolkit
|
| 1051 |
+
except ImportError:
|
| 1052 |
+
logger.warning("prompt_toolkit not found. Using basic input instead.")
|
| 1053 |
+
prompt_toolkit = None
|
| 1054 |
+
|
| 1055 |
+
if prompt_toolkit:
|
| 1056 |
+
session = prompt_toolkit.PromptSession()
|
| 1057 |
+
|
| 1058 |
+
def input_line(prompt: str) -> str:
|
| 1059 |
+
return session.prompt(prompt)
|
| 1060 |
+
|
| 1061 |
+
else:
|
| 1062 |
+
|
| 1063 |
+
def input_line(prompt: str) -> str:
|
| 1064 |
+
return input(prompt)
|
| 1065 |
+
|
| 1066 |
+
try:
|
| 1067 |
+
while True:
|
| 1068 |
+
try:
|
| 1069 |
+
line = input_line("> ")
|
| 1070 |
+
if not line.strip():
|
| 1071 |
+
continue
|
| 1072 |
+
if len(line.strip()) == 1 and line.strip() in ["\x04", "\x1a"]: # Ctrl+D or Ctrl+Z with prompt_toolkit
|
| 1073 |
+
raise EOFError # Exit on Ctrl+D or Ctrl+Z
|
| 1074 |
+
|
| 1075 |
+
# Parse prompt
|
| 1076 |
+
prompt_data = parse_prompt_line(line)
|
| 1077 |
+
prompt_args = apply_overrides(args, prompt_data)
|
| 1078 |
+
|
| 1079 |
+
# Generate latent
|
| 1080 |
+
# For interactive, precomputed data is None. shared_models contains text/image encoders.
|
| 1081 |
+
# generate will load VAE internally.
|
| 1082 |
+
returned_vae, latent = generate(prompt_args, gen_settings, shared_models)
|
| 1083 |
+
|
| 1084 |
+
# # If not one_frame_inference, move DiT model to CPU after generation
|
| 1085 |
+
# if prompt_args.blocks_to_swap > 0:
|
| 1086 |
+
# logger.info("Waiting for 5 seconds to finish block swap")
|
| 1087 |
+
# time.sleep(5)
|
| 1088 |
+
# model = shared_models.get("model")
|
| 1089 |
+
# model.to("cpu") # Move DiT model to CPU after generation
|
| 1090 |
+
|
| 1091 |
+
# Save latent and video
|
| 1092 |
+
# returned_vae from generate will be used for decoding here.
|
| 1093 |
+
save_output(prompt_args, returned_vae, latent[0], device)
|
| 1094 |
+
|
| 1095 |
+
except KeyboardInterrupt:
|
| 1096 |
+
print("\nInterrupted. Continue (Ctrl+D or Ctrl+Z (Windows) to exit)")
|
| 1097 |
+
continue
|
| 1098 |
+
|
| 1099 |
+
except EOFError:
|
| 1100 |
+
print("\nExiting interactive mode")
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
def get_generation_settings(args: argparse.Namespace) -> GenerationSettings:
|
| 1104 |
+
device = torch.device(args.device)
|
| 1105 |
+
|
| 1106 |
+
dit_weight_dtype = torch.bfloat16 # default
|
| 1107 |
+
if args.fp8_scaled:
|
| 1108 |
+
dit_weight_dtype = None # various precision weights, so don't cast to specific dtype
|
| 1109 |
+
elif args.fp8:
|
| 1110 |
+
dit_weight_dtype = torch.float8_e4m3fn
|
| 1111 |
+
|
| 1112 |
+
logger.info(f"Using device: {device}, DiT weight weight precision: {dit_weight_dtype}")
|
| 1113 |
+
|
| 1114 |
+
gen_settings = GenerationSettings(device=device, dit_weight_dtype=dit_weight_dtype)
|
| 1115 |
+
return gen_settings
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
def main():
|
| 1119 |
+
# Parse arguments
|
| 1120 |
+
args = parse_args()
|
| 1121 |
+
|
| 1122 |
+
# Check if latents are provided
|
| 1123 |
+
latents_mode = args.latent_path is not None and len(args.latent_path) > 0
|
| 1124 |
+
|
| 1125 |
+
# Set device
|
| 1126 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 1127 |
+
device = torch.device(device)
|
| 1128 |
+
logger.info(f"Using device: {device}")
|
| 1129 |
+
args.device = device
|
| 1130 |
+
|
| 1131 |
+
if latents_mode:
|
| 1132 |
+
# Original latent decode mode
|
| 1133 |
+
original_base_names = []
|
| 1134 |
+
latents_list = []
|
| 1135 |
+
seeds = []
|
| 1136 |
+
|
| 1137 |
+
# assert len(args.latent_path) == 1, "Only one latent path is supported for now"
|
| 1138 |
+
|
| 1139 |
+
for latent_path in args.latent_path:
|
| 1140 |
+
original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0])
|
| 1141 |
+
seed = 0
|
| 1142 |
+
|
| 1143 |
+
if os.path.splitext(latent_path)[1] != ".safetensors":
|
| 1144 |
+
latents = torch.load(latent_path, map_location="cpu")
|
| 1145 |
+
else:
|
| 1146 |
+
latents = load_file(latent_path)["latent"]
|
| 1147 |
+
with safe_open(latent_path, framework="pt") as f:
|
| 1148 |
+
metadata = f.metadata()
|
| 1149 |
+
if metadata is None:
|
| 1150 |
+
metadata = {}
|
| 1151 |
+
logger.info(f"Loaded metadata: {metadata}")
|
| 1152 |
+
|
| 1153 |
+
if "seeds" in metadata:
|
| 1154 |
+
seed = int(metadata["seeds"])
|
| 1155 |
+
if "height" in metadata and "width" in metadata:
|
| 1156 |
+
height = int(metadata["height"])
|
| 1157 |
+
width = int(metadata["width"])
|
| 1158 |
+
args.image_size = [height, width]
|
| 1159 |
+
|
| 1160 |
+
seeds.append(seed)
|
| 1161 |
+
logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}")
|
| 1162 |
+
|
| 1163 |
+
if latents.ndim == 5: # [BCTHW]
|
| 1164 |
+
latents = latents.squeeze(0) # [CTHW]
|
| 1165 |
+
|
| 1166 |
+
latents_list.append(latents)
|
| 1167 |
+
|
| 1168 |
+
# latent = torch.stack(latents_list, dim=0) # [N, ...], must be same shape
|
| 1169 |
+
|
| 1170 |
+
for i, latent in enumerate(latents_list):
|
| 1171 |
+
args.seed = seeds[i]
|
| 1172 |
+
|
| 1173 |
+
ae = flux2_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 1174 |
+
save_output(args, ae, latent, device, original_base_names)
|
| 1175 |
+
|
| 1176 |
+
elif args.from_file:
|
| 1177 |
+
# Batch mode from file
|
| 1178 |
+
|
| 1179 |
+
# Read prompts from file
|
| 1180 |
+
with open(args.from_file, "r", encoding="utf-8") as f:
|
| 1181 |
+
prompt_lines = f.readlines()
|
| 1182 |
+
|
| 1183 |
+
# Process prompts
|
| 1184 |
+
prompts_data = preprocess_prompts_for_batch(prompt_lines, args)
|
| 1185 |
+
process_batch_prompts(prompts_data, args)
|
| 1186 |
+
|
| 1187 |
+
elif args.interactive:
|
| 1188 |
+
# Interactive mode
|
| 1189 |
+
process_interactive(args)
|
| 1190 |
+
|
| 1191 |
+
else:
|
| 1192 |
+
# Single prompt mode (original behavior)
|
| 1193 |
+
|
| 1194 |
+
# Generate latent
|
| 1195 |
+
gen_settings = get_generation_settings(args)
|
| 1196 |
+
# For single mode, precomputed data is None, shared_models is None.
|
| 1197 |
+
# generate will load all necessary models (VAE, Text/Image Encoders, DiT).
|
| 1198 |
+
returned_vae, latent = generate(args, gen_settings)
|
| 1199 |
+
|
| 1200 |
+
if args.blocks_to_swap > 0:
|
| 1201 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 1202 |
+
time.sleep(5)
|
| 1203 |
+
gc.collect() # Force cleanup of DiT from GPU memory
|
| 1204 |
+
clean_memory_on_device(device) # clean memory on device before moving models
|
| 1205 |
+
|
| 1206 |
+
# Save latent and video
|
| 1207 |
+
# returned_vae from generate will be used for decoding here.
|
| 1208 |
+
save_output(args, returned_vae, latent[0], device)
|
| 1209 |
+
|
| 1210 |
+
logger.info("Done!")
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
if __name__ == "__main__":
|
| 1214 |
+
main()
|
src/musubi_tuner/flux_2_train_network.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from typing import Optional
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from accelerate import Accelerator
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 9 |
+
|
| 10 |
+
from musubi_tuner.flux_2 import flux2_models, flux2_utils
|
| 11 |
+
from musubi_tuner.hv_train_network import (
|
| 12 |
+
NetworkTrainer,
|
| 13 |
+
load_prompts,
|
| 14 |
+
clean_memory_on_device,
|
| 15 |
+
setup_parser_common,
|
| 16 |
+
read_config_from_file,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
import logging
|
| 20 |
+
|
| 21 |
+
from musubi_tuner.utils import model_utils
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
logging.basicConfig(level=logging.INFO)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Flux2NetworkTrainer(NetworkTrainer):
|
| 28 |
+
def __init__(self):
|
| 29 |
+
super().__init__()
|
| 30 |
+
|
| 31 |
+
# region model specific
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def architecture(self) -> str:
|
| 35 |
+
return self.model_version_info.architecture
|
| 36 |
+
|
| 37 |
+
@property
|
| 38 |
+
def architecture_full_name(self) -> str:
|
| 39 |
+
return self.model_version_info.architecture_full
|
| 40 |
+
|
| 41 |
+
def handle_model_specific_args(self, args):
|
| 42 |
+
self.model_version_info = flux2_utils.FLUX2_MODEL_INFO[args.model_version]
|
| 43 |
+
self.dit_dtype = torch.float16 if args.mixed_precision == "fp16" else torch.bfloat16
|
| 44 |
+
self._i2v_training = False
|
| 45 |
+
self._control_training = False # this means video training, not control image training
|
| 46 |
+
self.default_guidance_scale = 4.0 # CFG scale for inference for base models
|
| 47 |
+
self.default_discrete_flow_shift = None # Use FLUX.2 shift as default
|
| 48 |
+
|
| 49 |
+
def process_sample_prompts(self, args: argparse.Namespace, accelerator: Accelerator, sample_prompts: str):
|
| 50 |
+
device = accelerator.device
|
| 51 |
+
|
| 52 |
+
logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}")
|
| 53 |
+
prompts = load_prompts(sample_prompts)
|
| 54 |
+
|
| 55 |
+
# Load Text Encoder (Mistral 3 or Qwen-3)
|
| 56 |
+
te_dtype = torch.float8_e4m3fn if args.fp8_text_encoder else torch.bfloat16
|
| 57 |
+
text_embedder = flux2_utils.load_text_embedder(
|
| 58 |
+
self.model_version_info, args.text_encoder, dtype=te_dtype, device=device, disable_mmap=True
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Encode with Text Encoder (Mistral 3 or Qwen-3)
|
| 62 |
+
logger.info("Encoding with Text Encoder (Mistral 3 or Qwen-3)...")
|
| 63 |
+
|
| 64 |
+
sample_prompts_te_outputs = {} # prompt -> encoded tensor
|
| 65 |
+
for prompt_dict in prompts:
|
| 66 |
+
# add negative prompt if not present even if the model is guidance distilled for simplicity
|
| 67 |
+
if "negative_prompt" not in prompt_dict:
|
| 68 |
+
prompt_dict["negative_prompt"] = " "
|
| 69 |
+
|
| 70 |
+
for p in [prompt_dict.get("prompt", ""), prompt_dict.get("negative_prompt", " ")]:
|
| 71 |
+
if p is None or p in sample_prompts_te_outputs:
|
| 72 |
+
continue
|
| 73 |
+
|
| 74 |
+
# encode prompt
|
| 75 |
+
logger.info(f"cache Text Encoder outputs for prompt: {p}")
|
| 76 |
+
with torch.no_grad():
|
| 77 |
+
if te_dtype.itemsize == 1:
|
| 78 |
+
with torch.amp.autocast(device_type=device.type, dtype=torch.bfloat16):
|
| 79 |
+
ctx_vec = text_embedder([p]) # [1, 512, 15360]
|
| 80 |
+
else:
|
| 81 |
+
ctx_vec = text_embedder([p]) # [1, 512, 15360]
|
| 82 |
+
ctx_vec = ctx_vec.cpu()
|
| 83 |
+
|
| 84 |
+
# save prompt cache
|
| 85 |
+
sample_prompts_te_outputs[p] = ctx_vec
|
| 86 |
+
|
| 87 |
+
del text_embedder
|
| 88 |
+
clean_memory_on_device(device)
|
| 89 |
+
|
| 90 |
+
# prepare sample parameters
|
| 91 |
+
sample_parameters = []
|
| 92 |
+
for prompt_dict in prompts:
|
| 93 |
+
prompt_dict_copy = prompt_dict.copy()
|
| 94 |
+
|
| 95 |
+
p = prompt_dict.get("prompt", "")
|
| 96 |
+
prompt_dict_copy["ctx_vec"] = sample_prompts_te_outputs[p]
|
| 97 |
+
p = prompt_dict.get("negative_prompt", " ")
|
| 98 |
+
prompt_dict_copy["negative_ctx_vec"] = sample_prompts_te_outputs[p]
|
| 99 |
+
|
| 100 |
+
sample_parameters.append(prompt_dict_copy)
|
| 101 |
+
|
| 102 |
+
clean_memory_on_device(accelerator.device)
|
| 103 |
+
|
| 104 |
+
return sample_parameters
|
| 105 |
+
|
| 106 |
+
def do_inference(
|
| 107 |
+
self,
|
| 108 |
+
accelerator,
|
| 109 |
+
args,
|
| 110 |
+
sample_parameter,
|
| 111 |
+
vae,
|
| 112 |
+
dit_dtype,
|
| 113 |
+
transformer,
|
| 114 |
+
discrete_flow_shift,
|
| 115 |
+
sample_steps,
|
| 116 |
+
width,
|
| 117 |
+
height,
|
| 118 |
+
frame_count,
|
| 119 |
+
generator,
|
| 120 |
+
do_classifier_free_guidance,
|
| 121 |
+
guidance_scale,
|
| 122 |
+
cfg_scale,
|
| 123 |
+
image_path=None,
|
| 124 |
+
control_video_path=None,
|
| 125 |
+
):
|
| 126 |
+
"""architecture dependent inference"""
|
| 127 |
+
model: flux2_models.Flux2 = transformer
|
| 128 |
+
device = accelerator.device
|
| 129 |
+
|
| 130 |
+
# Get embeddings
|
| 131 |
+
ctx = sample_parameter["ctx_vec"].to(device=device, dtype=torch.bfloat16) # [1, 512, 15360]
|
| 132 |
+
ctx, ctx_ids = flux2_utils.prc_txt(ctx) # [1, 512, 15360], [1, 512, 4]
|
| 133 |
+
negative_ctx = sample_parameter.get("negative_ctx_vec").to(device=device, dtype=torch.bfloat16)
|
| 134 |
+
negative_ctx, negative_ctx_ids = flux2_utils.prc_txt(negative_ctx)
|
| 135 |
+
|
| 136 |
+
# Initialize latents
|
| 137 |
+
packed_latent_height, packed_latent_width = height // 16, width // 16
|
| 138 |
+
latents = randn_tensor(
|
| 139 |
+
(1, 128, packed_latent_height, packed_latent_width), # [1, 128, 52, 78]
|
| 140 |
+
generator=generator,
|
| 141 |
+
device=device,
|
| 142 |
+
dtype=torch.bfloat16,
|
| 143 |
+
)
|
| 144 |
+
x, x_ids = flux2_utils.prc_img(latents) # [1, 4056, 128], [1, 4056, 4]
|
| 145 |
+
|
| 146 |
+
# prepare control latent
|
| 147 |
+
ref_tokens = None
|
| 148 |
+
ref_ids = None
|
| 149 |
+
if "control_image_path" in sample_parameter:
|
| 150 |
+
vae.to(device)
|
| 151 |
+
vae.eval()
|
| 152 |
+
|
| 153 |
+
control_image_paths = sample_parameter["control_image_path"]
|
| 154 |
+
limit_size = (2024, 2024) if len(control_image_paths) == 1 else (1024, 1024)
|
| 155 |
+
control_latent_list = []
|
| 156 |
+
with torch.no_grad():
|
| 157 |
+
for image_path in control_image_paths:
|
| 158 |
+
control_image_tensor, _, _ = flux2_utils.preprocess_control_image(image_path, limit_size)
|
| 159 |
+
control_latent = vae.encode(control_image_tensor.to(device, vae.dtype))
|
| 160 |
+
control_latent_list.append(control_latent.squeeze(0))
|
| 161 |
+
|
| 162 |
+
ref_tokens, ref_ids = flux2_utils.pack_control_latent(control_latent_list)
|
| 163 |
+
|
| 164 |
+
vae.to("cpu")
|
| 165 |
+
clean_memory_on_device(device)
|
| 166 |
+
|
| 167 |
+
# denoise
|
| 168 |
+
timesteps = flux2_utils.get_schedule(sample_steps, x.shape[1], discrete_flow_shift)
|
| 169 |
+
if self.model_version_info.guidance_distilled:
|
| 170 |
+
x = flux2_utils.denoise(
|
| 171 |
+
model,
|
| 172 |
+
x,
|
| 173 |
+
x_ids,
|
| 174 |
+
ctx,
|
| 175 |
+
ctx_ids,
|
| 176 |
+
timesteps=timesteps,
|
| 177 |
+
guidance=guidance_scale,
|
| 178 |
+
img_cond_seq=ref_tokens,
|
| 179 |
+
img_cond_seq_ids=ref_ids,
|
| 180 |
+
)
|
| 181 |
+
else:
|
| 182 |
+
x = flux2_utils.denoise_cfg(
|
| 183 |
+
model,
|
| 184 |
+
x,
|
| 185 |
+
x_ids,
|
| 186 |
+
ctx,
|
| 187 |
+
ctx_ids,
|
| 188 |
+
negative_ctx,
|
| 189 |
+
negative_ctx_ids,
|
| 190 |
+
timesteps=timesteps,
|
| 191 |
+
guidance=guidance_scale,
|
| 192 |
+
img_cond_seq=ref_tokens,
|
| 193 |
+
img_cond_seq_ids=ref_ids,
|
| 194 |
+
)
|
| 195 |
+
x = torch.cat(flux2_utils.scatter_ids(x, x_ids)).squeeze(2)
|
| 196 |
+
latent = x.to(vae.dtype)
|
| 197 |
+
del x
|
| 198 |
+
|
| 199 |
+
# Move VAE to the appropriate device for sampling
|
| 200 |
+
vae.to(device)
|
| 201 |
+
vae.eval()
|
| 202 |
+
|
| 203 |
+
# Decode latents to video
|
| 204 |
+
logger.info(f"Decoding video from latents: {latent.shape}")
|
| 205 |
+
with torch.no_grad():
|
| 206 |
+
pixels = vae.decode(latent) # decode to pixels
|
| 207 |
+
del latent
|
| 208 |
+
|
| 209 |
+
logger.info("Decoding complete")
|
| 210 |
+
pixels = pixels.to(torch.float32).cpu()
|
| 211 |
+
pixels = (pixels / 2 + 0.5).clamp(0, 1) # -1 to 1 -> 0 to 1
|
| 212 |
+
|
| 213 |
+
vae.to("cpu")
|
| 214 |
+
clean_memory_on_device(device)
|
| 215 |
+
|
| 216 |
+
pixels = pixels.unsqueeze(2) # add a dummy dimension for video frames, B C H W -> B C 1 H W
|
| 217 |
+
return pixels
|
| 218 |
+
|
| 219 |
+
def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str):
|
| 220 |
+
vae_path = args.vae
|
| 221 |
+
|
| 222 |
+
logger.info(f"Loading AE model from {vae_path}")
|
| 223 |
+
ae = flux2_utils.load_ae(vae_path, dtype=vae_dtype, device="cpu", disable_mmap=True)
|
| 224 |
+
return ae
|
| 225 |
+
|
| 226 |
+
def load_transformer(
|
| 227 |
+
self,
|
| 228 |
+
accelerator: Accelerator,
|
| 229 |
+
args: argparse.Namespace,
|
| 230 |
+
dit_path: str,
|
| 231 |
+
attn_mode: str,
|
| 232 |
+
split_attn: bool,
|
| 233 |
+
loading_device: str,
|
| 234 |
+
dit_weight_dtype: Optional[torch.dtype],
|
| 235 |
+
):
|
| 236 |
+
model = flux2_utils.load_flow_model(
|
| 237 |
+
accelerator.device,
|
| 238 |
+
model_version_info=self.model_version_info,
|
| 239 |
+
dit_path=dit_path,
|
| 240 |
+
attn_mode=attn_mode,
|
| 241 |
+
split_attn=split_attn,
|
| 242 |
+
loading_device=loading_device,
|
| 243 |
+
dit_weight_dtype=dit_weight_dtype,
|
| 244 |
+
fp8_scaled=args.fp8_scaled,
|
| 245 |
+
disable_numpy_memmap=args.disable_numpy_memmap,
|
| 246 |
+
)
|
| 247 |
+
return model
|
| 248 |
+
|
| 249 |
+
def compile_transformer(self, args, transformer):
|
| 250 |
+
transformer: flux2_models.Flux2 = transformer
|
| 251 |
+
return model_utils.compile_transformer(
|
| 252 |
+
args, transformer, [transformer.double_blocks, transformer.single_blocks], disable_linear=self.blocks_to_swap > 0
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
def scale_shift_latents(self, latents):
|
| 256 |
+
return latents
|
| 257 |
+
|
| 258 |
+
def call_dit(
|
| 259 |
+
self,
|
| 260 |
+
args: argparse.Namespace,
|
| 261 |
+
accelerator: Accelerator,
|
| 262 |
+
transformer,
|
| 263 |
+
latents: torch.Tensor,
|
| 264 |
+
batch: dict[str, torch.Tensor],
|
| 265 |
+
noise: torch.Tensor,
|
| 266 |
+
noisy_model_input: torch.Tensor,
|
| 267 |
+
timesteps: torch.Tensor,
|
| 268 |
+
network_dtype: torch.dtype,
|
| 269 |
+
):
|
| 270 |
+
model: flux2_models.Flux2 = transformer
|
| 271 |
+
|
| 272 |
+
bsize = latents.shape[0]
|
| 273 |
+
# pack latents
|
| 274 |
+
packed_latent_height = latents.shape[2]
|
| 275 |
+
packed_latent_width = latents.shape[3]
|
| 276 |
+
noisy_model_input, img_ids = flux2_utils.prc_img(noisy_model_input) # (B, HW, C), (B, HW, 4)
|
| 277 |
+
|
| 278 |
+
# control
|
| 279 |
+
num_control_images = 0
|
| 280 |
+
ref_tokens, ref_ids = None, None
|
| 281 |
+
if "latents_control_0" in batch:
|
| 282 |
+
control_latents: list[torch.Tensor] = []
|
| 283 |
+
while True:
|
| 284 |
+
key = f"latents_control_{num_control_images}"
|
| 285 |
+
if key in batch:
|
| 286 |
+
control_latents.append(batch[key]) # list of (B, C, H, W)
|
| 287 |
+
num_control_images += 1
|
| 288 |
+
else:
|
| 289 |
+
break
|
| 290 |
+
|
| 291 |
+
ref_tokens, ref_ids = flux2_utils.pack_control_latent(control_latents)
|
| 292 |
+
|
| 293 |
+
# context
|
| 294 |
+
ctx_vec = batch["ctx_vec"] # B, T, D = B, 512, 15360]
|
| 295 |
+
ctx, ctx_ids = flux2_utils.prc_txt(ctx_vec) # [B, 512, 15360], [B, 512, 4]
|
| 296 |
+
|
| 297 |
+
# ensure the hidden state will require grad
|
| 298 |
+
if args.gradient_checkpointing:
|
| 299 |
+
noisy_model_input.requires_grad_(True)
|
| 300 |
+
ctx.requires_grad_(True)
|
| 301 |
+
if ref_tokens is not None:
|
| 302 |
+
ref_tokens.requires_grad_(True)
|
| 303 |
+
|
| 304 |
+
# call DiT
|
| 305 |
+
noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype)
|
| 306 |
+
img_ids = img_ids.to(device=accelerator.device)
|
| 307 |
+
if ref_tokens is not None:
|
| 308 |
+
ref_tokens = ref_tokens.to(device=accelerator.device, dtype=network_dtype)
|
| 309 |
+
ref_ids = ref_ids.to(device=accelerator.device)
|
| 310 |
+
ctx = ctx.to(device=accelerator.device, dtype=network_dtype)
|
| 311 |
+
ctx_ids = ctx_ids.to(device=accelerator.device)
|
| 312 |
+
|
| 313 |
+
# use 1.0 as guidance scale for FLUX.2 non-base training
|
| 314 |
+
guidance_vec = torch.full((bsize,), 1.0, device=accelerator.device, dtype=network_dtype)
|
| 315 |
+
|
| 316 |
+
img_input = noisy_model_input # [B, HW, C]
|
| 317 |
+
img_input_ids = img_ids # [B, HW, 4]
|
| 318 |
+
if ref_tokens is not None:
|
| 319 |
+
img_input = torch.cat((img_input, ref_tokens), dim=1)
|
| 320 |
+
img_input_ids = torch.cat((img_input_ids, ref_ids), dim=1)
|
| 321 |
+
|
| 322 |
+
timesteps = timesteps / 1000.0
|
| 323 |
+
model_pred = model(x=img_input, x_ids=img_input_ids, timesteps=timesteps, ctx=ctx, ctx_ids=ctx_ids, guidance=guidance_vec)
|
| 324 |
+
model_pred = model_pred[:, : noisy_model_input.shape[1]] # [B, 4096, 128]
|
| 325 |
+
|
| 326 |
+
# unpack height/width latents
|
| 327 |
+
model_pred = rearrange(model_pred, "b (h w) c -> b c h w", h=packed_latent_height, w=packed_latent_width)
|
| 328 |
+
|
| 329 |
+
# flow matching loss
|
| 330 |
+
latents = latents.to(device=accelerator.device, dtype=network_dtype)
|
| 331 |
+
target = noise - latents
|
| 332 |
+
|
| 333 |
+
return model_pred, target
|
| 334 |
+
|
| 335 |
+
# endregion model specific
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def flux2_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 339 |
+
"""Flux.2-dev specific parser setup"""
|
| 340 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT / DiTにスケーリングされたfp8を使う")
|
| 341 |
+
parser.add_argument("--text_encoder", type=str, default=None, help="text encoder checkpoint path")
|
| 342 |
+
parser.add_argument("--fp8_text_encoder", action="store_true", help="use fp8 for Text Encoder model")
|
| 343 |
+
flux2_utils.add_model_version_args(parser)
|
| 344 |
+
return parser
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def main():
|
| 348 |
+
parser = setup_parser_common()
|
| 349 |
+
parser = flux2_setup_parser(parser)
|
| 350 |
+
|
| 351 |
+
args = parser.parse_args()
|
| 352 |
+
args = read_config_from_file(args, parser)
|
| 353 |
+
|
| 354 |
+
args.dit_dtype = None # set from mixed_precision
|
| 355 |
+
if args.vae_dtype is None:
|
| 356 |
+
args.vae_dtype = "float32" # make float32 as default for VAE
|
| 357 |
+
|
| 358 |
+
trainer = Flux2NetworkTrainer()
|
| 359 |
+
trainer.train(args)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
if __name__ == "__main__":
|
| 363 |
+
main()
|
src/musubi_tuner/fpack_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import LlamaTokenizerFast, LlamaModel, CLIPTokenizer, CLIPTextModel
|
| 5 |
+
from musubi_tuner.dataset import config_utils
|
| 6 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 7 |
+
from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_FRAMEPACK, ItemInfo, save_text_encoder_output_cache_framepack
|
| 8 |
+
import musubi_tuner.cache_text_encoder_outputs as cache_text_encoder_outputs
|
| 9 |
+
from musubi_tuner.frame_pack import hunyuan
|
| 10 |
+
from musubi_tuner.frame_pack.framepack_utils import load_text_encoder1, load_text_encoder2
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
logging.basicConfig(level=logging.INFO)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def encode_and_save_batch(
|
| 21 |
+
tokenizer1: LlamaTokenizerFast,
|
| 22 |
+
text_encoder1: LlamaModel,
|
| 23 |
+
tokenizer2: CLIPTokenizer,
|
| 24 |
+
text_encoder2: CLIPTextModel,
|
| 25 |
+
batch: list[ItemInfo],
|
| 26 |
+
device: torch.device,
|
| 27 |
+
):
|
| 28 |
+
prompts = [item.caption for item in batch]
|
| 29 |
+
|
| 30 |
+
# encode prompt
|
| 31 |
+
# FramePack's encode_prompt_conds only supports single prompt, so we need to encode each prompt separately
|
| 32 |
+
list_of_llama_vec = []
|
| 33 |
+
list_of_llama_attention_mask = []
|
| 34 |
+
list_of_clip_l_pooler = []
|
| 35 |
+
for prompt in prompts:
|
| 36 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 37 |
+
# llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(prompts, text_encoder1, text_encoder2, tokenizer1, tokenizer2)
|
| 38 |
+
llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2)
|
| 39 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
| 40 |
+
|
| 41 |
+
list_of_llama_vec.append(llama_vec.squeeze(0))
|
| 42 |
+
list_of_llama_attention_mask.append(llama_attention_mask.squeeze(0))
|
| 43 |
+
list_of_clip_l_pooler.append(clip_l_pooler.squeeze(0))
|
| 44 |
+
|
| 45 |
+
# save prompt cache
|
| 46 |
+
for item, llama_vec, llama_attention_mask, clip_l_pooler in zip(
|
| 47 |
+
batch, list_of_llama_vec, list_of_llama_attention_mask, list_of_clip_l_pooler
|
| 48 |
+
):
|
| 49 |
+
# save llama_vec and clip_l_pooler to cache
|
| 50 |
+
save_text_encoder_output_cache_framepack(item, llama_vec, llama_attention_mask, clip_l_pooler)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def main():
|
| 54 |
+
parser = cache_text_encoder_outputs.setup_parser_common()
|
| 55 |
+
parser = framepack_setup_parser(parser)
|
| 56 |
+
|
| 57 |
+
args = parser.parse_args()
|
| 58 |
+
|
| 59 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 60 |
+
device = torch.device(device)
|
| 61 |
+
|
| 62 |
+
# Load dataset config
|
| 63 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 64 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 65 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 66 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FRAMEPACK)
|
| 67 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 68 |
+
|
| 69 |
+
datasets = train_dataset_group.datasets
|
| 70 |
+
|
| 71 |
+
# prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset
|
| 72 |
+
all_cache_files_for_dataset, all_cache_paths_for_dataset = cache_text_encoder_outputs.prepare_cache_files_and_paths(datasets)
|
| 73 |
+
|
| 74 |
+
# load text encoder
|
| 75 |
+
tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device)
|
| 76 |
+
tokenizer2, text_encoder2 = load_text_encoder2(args)
|
| 77 |
+
text_encoder2.to(device)
|
| 78 |
+
|
| 79 |
+
# Encode with Text Encoders
|
| 80 |
+
logger.info("Encoding with Text Encoders")
|
| 81 |
+
|
| 82 |
+
def encode_for_text_encoder(batch: list[ItemInfo]):
|
| 83 |
+
encode_and_save_batch(tokenizer1, text_encoder1, tokenizer2, text_encoder2, batch, device)
|
| 84 |
+
|
| 85 |
+
cache_text_encoder_outputs.process_text_encoder_batches(
|
| 86 |
+
args.num_workers,
|
| 87 |
+
args.skip_existing,
|
| 88 |
+
args.batch_size,
|
| 89 |
+
datasets,
|
| 90 |
+
all_cache_files_for_dataset,
|
| 91 |
+
all_cache_paths_for_dataset,
|
| 92 |
+
encode_for_text_encoder,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# remove cache files not in dataset
|
| 96 |
+
cache_text_encoder_outputs.post_process_cache_files(
|
| 97 |
+
datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 102 |
+
parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory")
|
| 103 |
+
parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory")
|
| 104 |
+
parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)")
|
| 105 |
+
return parser
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
main()
|
src/musubi_tuner/fpack_generate_video.py
ADDED
|
@@ -0,0 +1,2210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import gc
|
| 3 |
+
from importlib.util import find_spec
|
| 4 |
+
import random
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import time
|
| 8 |
+
import copy
|
| 9 |
+
from typing import Tuple, Optional, List, Any, Dict
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from safetensors.torch import load_file, save_file
|
| 13 |
+
from safetensors import safe_open
|
| 14 |
+
from PIL import Image
|
| 15 |
+
import numpy as np
|
| 16 |
+
from tqdm import tqdm
|
| 17 |
+
|
| 18 |
+
from musubi_tuner.networks import lora_framepack
|
| 19 |
+
from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 20 |
+
from musubi_tuner.frame_pack import hunyuan
|
| 21 |
+
from musubi_tuner.frame_pack.hunyuan_video_packed import load_packed_model
|
| 22 |
+
from musubi_tuner.frame_pack.hunyuan_video_packed_inference import HunyuanVideoTransformer3DModelPackedInference
|
| 23 |
+
from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask, soft_append_bcthw
|
| 24 |
+
from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode
|
| 25 |
+
from musubi_tuner.frame_pack.k_diffusion_hunyuan import sample_hunyuan
|
| 26 |
+
from musubi_tuner.dataset import image_video_dataset
|
| 27 |
+
from musubi_tuner.utils import model_utils
|
| 28 |
+
from musubi_tuner.utils.lora_utils import filter_lora_state_dict
|
| 29 |
+
|
| 30 |
+
lycoris_available = find_spec("lycoris") is not None
|
| 31 |
+
|
| 32 |
+
from musubi_tuner.utils.device_utils import clean_memory_on_device
|
| 33 |
+
from musubi_tuner.hv_generate_video import (
|
| 34 |
+
get_time_flag,
|
| 35 |
+
save_images_grid,
|
| 36 |
+
save_videos_grid,
|
| 37 |
+
synchronize_device,
|
| 38 |
+
setup_parser_compile,
|
| 39 |
+
)
|
| 40 |
+
from musubi_tuner.wan_generate_video import merge_lora_weights
|
| 41 |
+
from musubi_tuner.frame_pack.framepack_utils import load_vae, load_text_encoder1, load_text_encoder2, load_image_encoders
|
| 42 |
+
|
| 43 |
+
import logging
|
| 44 |
+
|
| 45 |
+
logger = logging.getLogger(__name__)
|
| 46 |
+
logging.basicConfig(level=logging.INFO)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def parse_section_strings(input_string: str) -> dict[int, str]:
|
| 50 |
+
section_strings = {}
|
| 51 |
+
if input_string is None: # handle None input for image_path etc.
|
| 52 |
+
return section_strings
|
| 53 |
+
if ";;;" in input_string:
|
| 54 |
+
split_section_strings = input_string.split(";;;")
|
| 55 |
+
for section_str in split_section_strings:
|
| 56 |
+
if ":" not in section_str:
|
| 57 |
+
start = end = 0
|
| 58 |
+
section_str = section_str.strip()
|
| 59 |
+
else:
|
| 60 |
+
index_str, section_str = section_str.split(":", 1)
|
| 61 |
+
index_str = index_str.strip()
|
| 62 |
+
section_str = section_str.strip()
|
| 63 |
+
|
| 64 |
+
m = re.match(r"^(-?\d+)(-\d+)?$", index_str)
|
| 65 |
+
if m:
|
| 66 |
+
start = int(m.group(1))
|
| 67 |
+
end = int(m.group(2)[1:]) if m.group(2) is not None else start
|
| 68 |
+
else:
|
| 69 |
+
start = end = 0
|
| 70 |
+
section_str = section_str.strip()
|
| 71 |
+
for i in range(start, end + 1):
|
| 72 |
+
section_strings[i] = section_str
|
| 73 |
+
else:
|
| 74 |
+
section_strings[0] = input_string
|
| 75 |
+
|
| 76 |
+
if not section_strings: # If input_string was empty or only separators
|
| 77 |
+
return section_strings
|
| 78 |
+
|
| 79 |
+
if 0 not in section_strings:
|
| 80 |
+
indices = list(section_strings.keys())
|
| 81 |
+
if all(i < 0 for i in indices):
|
| 82 |
+
section_index = min(indices)
|
| 83 |
+
else:
|
| 84 |
+
section_index = min(i for i in indices if i >= 0)
|
| 85 |
+
section_strings[0] = section_strings[section_index]
|
| 86 |
+
return section_strings
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class GenerationSettings:
|
| 90 |
+
def __init__(self, device: torch.device, dit_weight_dtype: Optional[torch.dtype] = None):
|
| 91 |
+
self.device = device
|
| 92 |
+
self.dit_weight_dtype = dit_weight_dtype # not used currently because model may be optimized
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def parse_args() -> argparse.Namespace:
|
| 96 |
+
"""parse command line arguments"""
|
| 97 |
+
parser = argparse.ArgumentParser(description="FramePack inference script")
|
| 98 |
+
|
| 99 |
+
# WAN arguments
|
| 100 |
+
# parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).")
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
"--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample."
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
parser.add_argument("--dit", type=str, default=None, help="DiT directory or path")
|
| 106 |
+
parser.add_argument(
|
| 107 |
+
"--disable_numpy_memmap", action="store_true", help="Disable numpy memmap when loading safetensors. Default is False."
|
| 108 |
+
)
|
| 109 |
+
parser.add_argument("--vae", type=str, default=None, help="VAE directory or path")
|
| 110 |
+
parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory or path")
|
| 111 |
+
parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory or path")
|
| 112 |
+
parser.add_argument("--image_encoder", type=str, required=True, help="Image Encoder directory or path")
|
| 113 |
+
parser.add_argument("--f1", action="store_true", help="Use F1 sampling method")
|
| 114 |
+
|
| 115 |
+
# LoRA
|
| 116 |
+
parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path")
|
| 117 |
+
parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier")
|
| 118 |
+
parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns")
|
| 119 |
+
parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns")
|
| 120 |
+
parser.add_argument(
|
| 121 |
+
"--save_merged_model",
|
| 122 |
+
type=str,
|
| 123 |
+
default=None,
|
| 124 |
+
help="Save merged model to path. If specified, no inference will be performed.",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# inference
|
| 128 |
+
parser.add_argument(
|
| 129 |
+
"--prompt",
|
| 130 |
+
type=str,
|
| 131 |
+
default=None,
|
| 132 |
+
help="prompt for generation. If `;;;` is used, it will be split into sections. Example: `section_index:prompt` or "
|
| 133 |
+
"`section_index:prompt;;;section_index:prompt;;;...`, section_index can be `0` or `-1` or `0-2`, `-1` means last section, `0-2` means from 0 to 2 (inclusive).",
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--negative_prompt",
|
| 137 |
+
type=str,
|
| 138 |
+
default=None,
|
| 139 |
+
help="negative prompt for generation, default is empty string. should not change.",
|
| 140 |
+
)
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--custom_system_prompt",
|
| 143 |
+
type=str,
|
| 144 |
+
default=None,
|
| 145 |
+
help="Custom system prompt for LLM. If specified, it will override the default system prompt. See hunyuan_model/text_encoder.py for the default system prompt.",
|
| 146 |
+
)
|
| 147 |
+
parser.add_argument("--video_size", type=int, nargs=2, default=[256, 256], help="video size, height and width")
|
| 148 |
+
parser.add_argument("--video_seconds", type=float, default=5.0, help="video length, default is 5.0 seconds")
|
| 149 |
+
parser.add_argument(
|
| 150 |
+
"--video_sections",
|
| 151 |
+
type=int,
|
| 152 |
+
default=None,
|
| 153 |
+
help="number of video sections, Default is None (auto calculate from video seconds)",
|
| 154 |
+
)
|
| 155 |
+
parser.add_argument(
|
| 156 |
+
"--one_frame_inference",
|
| 157 |
+
type=str,
|
| 158 |
+
default=None,
|
| 159 |
+
help="one frame inference, default is None, comma separated values from 'no_2x', 'no_4x', 'no_post', 'control_indices' and 'target_index'.",
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--one_frame_auto_resize",
|
| 163 |
+
action="store_true",
|
| 164 |
+
help="Automatically adjust height and width based on control image size and given size for one frame inference. Default is False.",
|
| 165 |
+
)
|
| 166 |
+
parser.add_argument(
|
| 167 |
+
"--control_image_path", type=str, default=None, nargs="*", help="path to control (reference) image for one frame inference."
|
| 168 |
+
)
|
| 169 |
+
parser.add_argument(
|
| 170 |
+
"--control_image_mask_path",
|
| 171 |
+
type=str,
|
| 172 |
+
default=None,
|
| 173 |
+
nargs="*",
|
| 174 |
+
help="path to control (reference) image mask for one frame inference.",
|
| 175 |
+
)
|
| 176 |
+
parser.add_argument("--fps", type=int, default=30, help="video fps, default is 30")
|
| 177 |
+
parser.add_argument("--infer_steps", type=int, default=25, help="number of inference steps, default is 25")
|
| 178 |
+
parser.add_argument("--save_path", type=str, required=True, help="path to save generated video")
|
| 179 |
+
parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.")
|
| 180 |
+
# parser.add_argument(
|
| 181 |
+
# "--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False."
|
| 182 |
+
# )
|
| 183 |
+
parser.add_argument("--latent_window_size", type=int, default=9, help="latent window size, default is 9. should not change.")
|
| 184 |
+
parser.add_argument(
|
| 185 |
+
"--embedded_cfg_scale", type=float, default=10.0, help="Embeded CFG scale (distilled CFG Scale), default is 10.0"
|
| 186 |
+
)
|
| 187 |
+
parser.add_argument(
|
| 188 |
+
"--guidance_scale",
|
| 189 |
+
type=float,
|
| 190 |
+
default=1.0,
|
| 191 |
+
help="Guidance scale for classifier free guidance. Default is 1.0 (no guidance), should not change.",
|
| 192 |
+
)
|
| 193 |
+
parser.add_argument("--guidance_rescale", type=float, default=0.0, help="CFG Re-scale, default is 0.0. Should not change.")
|
| 194 |
+
# parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference")
|
| 195 |
+
parser.add_argument(
|
| 196 |
+
"--image_path",
|
| 197 |
+
type=str,
|
| 198 |
+
default=None,
|
| 199 |
+
help="path to image for image2video inference. If `;;;` is used, it will be used as section images. The notation is same as `--prompt`.",
|
| 200 |
+
)
|
| 201 |
+
parser.add_argument("--end_image_path", type=str, default=None, help="path to end image for image2video inference")
|
| 202 |
+
parser.add_argument(
|
| 203 |
+
"--latent_paddings",
|
| 204 |
+
type=str,
|
| 205 |
+
default=None,
|
| 206 |
+
help="latent paddings for each section, comma separated values. default is None (FramePack default paddings)",
|
| 207 |
+
)
|
| 208 |
+
# parser.add_argument(
|
| 209 |
+
# "--control_path",
|
| 210 |
+
# type=str,
|
| 211 |
+
# default=None,
|
| 212 |
+
# help="path to control video for inference with controlnet. video file or directory with images",
|
| 213 |
+
# )
|
| 214 |
+
# parser.add_argument("--trim_tail_frames", type=int, default=0, help="trim tail N frames from the video before saving")
|
| 215 |
+
|
| 216 |
+
# Flow Matching
|
| 217 |
+
parser.add_argument(
|
| 218 |
+
"--flow_shift",
|
| 219 |
+
type=float,
|
| 220 |
+
default=None,
|
| 221 |
+
help="Shift factor for flow matching schedulers. Default is None (FramePack default).",
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model")
|
| 225 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8")
|
| 226 |
+
# parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arithmetic (RTX 4XXX+), only for fp8_scaled")
|
| 227 |
+
parser.add_argument(
|
| 228 |
+
"--rope_scaling_factor", type=float, default=0.5, help="RoPE scaling factor for high resolution (H/W), default is 0.5"
|
| 229 |
+
)
|
| 230 |
+
parser.add_argument(
|
| 231 |
+
"--rope_scaling_timestep_threshold",
|
| 232 |
+
type=int,
|
| 233 |
+
default=None,
|
| 234 |
+
help="RoPE scaling timestep threshold, default is None (disable), if set, RoPE scaling will be applied only for timesteps >= threshold, around 800 is good starting point",
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)")
|
| 238 |
+
parser.add_argument(
|
| 239 |
+
"--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU"
|
| 240 |
+
)
|
| 241 |
+
parser.add_argument(
|
| 242 |
+
"--attn_mode",
|
| 243 |
+
type=str,
|
| 244 |
+
default="torch",
|
| 245 |
+
choices=["flash", "torch", "sageattn", "xformers", "sdpa"], # "flash2", "flash3",
|
| 246 |
+
help="attention mode",
|
| 247 |
+
)
|
| 248 |
+
parser.add_argument(
|
| 249 |
+
"--vae_tiling",
|
| 250 |
+
action="store_true",
|
| 251 |
+
help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled",
|
| 252 |
+
)
|
| 253 |
+
parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE")
|
| 254 |
+
parser.add_argument(
|
| 255 |
+
"--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256"
|
| 256 |
+
)
|
| 257 |
+
parser.add_argument("--bulk_decode", action="store_true", help="decode all frames at once")
|
| 258 |
+
parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model")
|
| 259 |
+
parser.add_argument(
|
| 260 |
+
"--use_pinned_memory_for_block_swap",
|
| 261 |
+
action="store_true",
|
| 262 |
+
help="use pinned memory for block swapping, which may speed up data transfer between CPU and GPU but uses more shared GPU memory on Windows",
|
| 263 |
+
)
|
| 264 |
+
parser.add_argument(
|
| 265 |
+
"--output_type",
|
| 266 |
+
type=str,
|
| 267 |
+
default="video",
|
| 268 |
+
choices=["video", "images", "latent", "both", "latent_images"],
|
| 269 |
+
help="output type",
|
| 270 |
+
)
|
| 271 |
+
parser.add_argument("--no_metadata", action="store_true", help="do not save metadata")
|
| 272 |
+
parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference")
|
| 273 |
+
parser.add_argument(
|
| 274 |
+
"--lycoris", action="store_true", help=f"use lycoris for inference{'' if lycoris_available else ' (not available)'}"
|
| 275 |
+
)
|
| 276 |
+
setup_parser_compile(parser)
|
| 277 |
+
|
| 278 |
+
# MagCache
|
| 279 |
+
parser.add_argument(
|
| 280 |
+
"--magcache_mag_ratios",
|
| 281 |
+
type=str,
|
| 282 |
+
default=None,
|
| 283 |
+
help="Enable MagCache for inference with specified ratios, comma separated values. Example: `1.0,1.06971,1.29073,...`. "
|
| 284 |
+
+ "It is recommended to use same count of ratios as as inference steps."
|
| 285 |
+
+ "Default is None (disabled), if `0` is specified, it will use default ratios for 50 steps.",
|
| 286 |
+
)
|
| 287 |
+
parser.add_argument("--magcache_retention_ratio", type=float, default=0.2, help="MagCache retention ratio, default is 0.2")
|
| 288 |
+
parser.add_argument("--magcache_threshold", type=float, default=0.24, help="MagCache threshold, default is 0.24")
|
| 289 |
+
parser.add_argument("--magcache_k", type=int, default=6, help="MagCache k value, default is 6")
|
| 290 |
+
parser.add_argument("--magcache_calibration", action="store_true", help="Enable MagCache calibration")
|
| 291 |
+
|
| 292 |
+
# New arguments for batch and interactive modes
|
| 293 |
+
parser.add_argument("--from_file", type=str, default=None, help="Read prompts from a file")
|
| 294 |
+
parser.add_argument("--interactive", action="store_true", help="Interactive mode: read prompts from console")
|
| 295 |
+
|
| 296 |
+
args = parser.parse_args()
|
| 297 |
+
|
| 298 |
+
# Validate arguments
|
| 299 |
+
if args.from_file and args.interactive:
|
| 300 |
+
raise ValueError("Cannot use both --from_file and --interactive at the same time")
|
| 301 |
+
|
| 302 |
+
if args.latent_path is None or len(args.latent_path) == 0:
|
| 303 |
+
if args.prompt is None and not args.from_file and not args.interactive:
|
| 304 |
+
raise ValueError("Either --prompt, --from_file or --interactive must be specified")
|
| 305 |
+
|
| 306 |
+
if args.lycoris and not lycoris_available:
|
| 307 |
+
raise ValueError("install lycoris: https://github.com/KohakuBlueleaf/LyCORIS")
|
| 308 |
+
|
| 309 |
+
return args
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def parse_prompt_line(line: str) -> Dict[str, Any]:
|
| 313 |
+
"""Parse a prompt line into a dictionary of argument overrides
|
| 314 |
+
|
| 315 |
+
Args:
|
| 316 |
+
line: Prompt line with options
|
| 317 |
+
|
| 318 |
+
Returns:
|
| 319 |
+
Dict[str, Any]: Dictionary of argument overrides
|
| 320 |
+
"""
|
| 321 |
+
# TODO common function with hv_train_network.line_to_prompt_dict
|
| 322 |
+
parts = line.split(" --")
|
| 323 |
+
prompt = parts[0].strip()
|
| 324 |
+
|
| 325 |
+
# Create dictionary of overrides
|
| 326 |
+
overrides = {"prompt": prompt}
|
| 327 |
+
# Initialize control_image_path and control_image_mask_path as a list to accommodate multiple paths
|
| 328 |
+
overrides["control_image_path"] = []
|
| 329 |
+
overrides["control_image_mask_path"] = []
|
| 330 |
+
|
| 331 |
+
for part in parts[1:]:
|
| 332 |
+
if not part.strip():
|
| 333 |
+
continue
|
| 334 |
+
option_parts = part.split(" ", 1)
|
| 335 |
+
option = option_parts[0].strip()
|
| 336 |
+
value = option_parts[1].strip() if len(option_parts) > 1 else ""
|
| 337 |
+
|
| 338 |
+
# Map options to argument names
|
| 339 |
+
if option == "w":
|
| 340 |
+
overrides["video_size_width"] = int(value)
|
| 341 |
+
elif option == "h":
|
| 342 |
+
overrides["video_size_height"] = int(value)
|
| 343 |
+
elif option == "f":
|
| 344 |
+
overrides["video_seconds"] = float(value)
|
| 345 |
+
elif option == "d":
|
| 346 |
+
overrides["seed"] = int(value)
|
| 347 |
+
elif option == "s":
|
| 348 |
+
overrides["infer_steps"] = int(value)
|
| 349 |
+
elif option == "g" or option == "l":
|
| 350 |
+
overrides["guidance_scale"] = float(value)
|
| 351 |
+
elif option == "fs":
|
| 352 |
+
overrides["flow_shift"] = float(value)
|
| 353 |
+
elif option == "i":
|
| 354 |
+
overrides["image_path"] = value
|
| 355 |
+
# elif option == "im":
|
| 356 |
+
# overrides["image_mask_path"] = value
|
| 357 |
+
# elif option == "cn":
|
| 358 |
+
# overrides["control_path"] = value
|
| 359 |
+
elif option == "n":
|
| 360 |
+
overrides["negative_prompt"] = value
|
| 361 |
+
elif option == "vs": # video_sections
|
| 362 |
+
overrides["video_sections"] = int(value)
|
| 363 |
+
elif option == "ei": # end_image_path
|
| 364 |
+
overrides["end_image_path"] = value
|
| 365 |
+
elif option == "ci": # control_image_path
|
| 366 |
+
overrides["control_image_path"].append(value)
|
| 367 |
+
elif option == "cim": # control_image_mask_path
|
| 368 |
+
overrides["control_image_mask_path"].append(value)
|
| 369 |
+
elif option == "of": # one_frame_inference
|
| 370 |
+
overrides["one_frame_inference"] = value
|
| 371 |
+
# magcache
|
| 372 |
+
elif option == "mcrr": # magcache retention ratio
|
| 373 |
+
overrides["magcache_retention_ratio"] = float(value)
|
| 374 |
+
elif option == "mct": # magcache threshold
|
| 375 |
+
overrides["magcache_threshold"] = float(value)
|
| 376 |
+
elif option == "mck": # magcache k
|
| 377 |
+
overrides["magcache_k"] = int(value)
|
| 378 |
+
|
| 379 |
+
# If no control_image_path was provided, remove the empty list
|
| 380 |
+
if not overrides["control_image_path"]:
|
| 381 |
+
del overrides["control_image_path"]
|
| 382 |
+
if not overrides["control_image_mask_path"]:
|
| 383 |
+
del overrides["control_image_mask_path"]
|
| 384 |
+
|
| 385 |
+
return overrides
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def apply_overrides(args: argparse.Namespace, overrides: Dict[str, Any]) -> argparse.Namespace:
|
| 389 |
+
"""Apply overrides to args
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
args: Original arguments
|
| 393 |
+
overrides: Dictionary of overrides
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
argparse.Namespace: New arguments with overrides applied
|
| 397 |
+
"""
|
| 398 |
+
args_copy = copy.deepcopy(args)
|
| 399 |
+
|
| 400 |
+
for key, value in overrides.items():
|
| 401 |
+
if key == "video_size_width":
|
| 402 |
+
args_copy.video_size[1] = value
|
| 403 |
+
elif key == "video_size_height":
|
| 404 |
+
args_copy.video_size[0] = value
|
| 405 |
+
else:
|
| 406 |
+
setattr(args_copy, key, value)
|
| 407 |
+
|
| 408 |
+
return args_copy
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def check_inputs(args: argparse.Namespace) -> Tuple[int, int, int]:
|
| 412 |
+
"""Validate video size and length
|
| 413 |
+
|
| 414 |
+
Args:
|
| 415 |
+
args: command line arguments
|
| 416 |
+
|
| 417 |
+
Returns:
|
| 418 |
+
Tuple[int, int, float]: (height, width, video_seconds)
|
| 419 |
+
"""
|
| 420 |
+
height = args.video_size[0]
|
| 421 |
+
width = args.video_size[1]
|
| 422 |
+
|
| 423 |
+
video_seconds = args.video_seconds
|
| 424 |
+
if args.video_sections is not None:
|
| 425 |
+
video_seconds = (args.video_sections * (args.latent_window_size * 4) + 1) / args.fps
|
| 426 |
+
|
| 427 |
+
if args.one_frame_inference is not None and args.one_frame_auto_resize and args.control_image_path is not None:
|
| 428 |
+
with Image.open(args.control_image_path[0]) as control_image:
|
| 429 |
+
width, height = image_video_dataset.BucketSelector.calculate_bucket_resolution(
|
| 430 |
+
control_image.size, (width, height), architecture=image_video_dataset.ARCHITECTURE_FRAMEPACK
|
| 431 |
+
)
|
| 432 |
+
logger.info(f"Adjusted image size to {width}x{height} based on control image size {control_image.size}")
|
| 433 |
+
|
| 434 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 435 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 436 |
+
|
| 437 |
+
return height, width, video_seconds
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
# region DiT model
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def load_dit_model(args: argparse.Namespace, device: torch.device) -> HunyuanVideoTransformer3DModelPackedInference:
|
| 444 |
+
"""load DiT model
|
| 445 |
+
|
| 446 |
+
Args:
|
| 447 |
+
args: command line arguments
|
| 448 |
+
device: device to use
|
| 449 |
+
|
| 450 |
+
Returns:
|
| 451 |
+
HunyuanVideoTransformer3DModelPackedInference: DiT model
|
| 452 |
+
"""
|
| 453 |
+
# If LyCORIS is enabled, we will load the model to CPU and then merge LoRA weights (static method)
|
| 454 |
+
|
| 455 |
+
loading_device = "cpu"
|
| 456 |
+
if args.blocks_to_swap == 0 and not args.lycoris:
|
| 457 |
+
loading_device = device
|
| 458 |
+
|
| 459 |
+
# load LoRA weights
|
| 460 |
+
if not args.lycoris and args.lora_weight is not None and len(args.lora_weight) > 0:
|
| 461 |
+
lora_weights_list = []
|
| 462 |
+
for lora_weight in args.lora_weight:
|
| 463 |
+
logger.info(f"Loading LoRA weight from: {lora_weight}")
|
| 464 |
+
lora_sd = load_file(lora_weight) # load on CPU, dtype is as is
|
| 465 |
+
lora_sd = convert_lora_for_framepack(lora_sd)
|
| 466 |
+
lora_sd = filter_lora_state_dict(lora_sd, args.include_patterns, args.exclude_patterns)
|
| 467 |
+
lora_weights_list.append(lora_sd)
|
| 468 |
+
else:
|
| 469 |
+
lora_weights_list = None
|
| 470 |
+
|
| 471 |
+
# load DiT model
|
| 472 |
+
logger.info(f"Loading DiT model from: {args.dit}")
|
| 473 |
+
model: HunyuanVideoTransformer3DModelPackedInference = load_packed_model(
|
| 474 |
+
device,
|
| 475 |
+
args.dit,
|
| 476 |
+
args.attn_mode,
|
| 477 |
+
loading_device,
|
| 478 |
+
args.fp8_scaled and not args.lycoris,
|
| 479 |
+
for_inference=True,
|
| 480 |
+
lora_weights_list=lora_weights_list,
|
| 481 |
+
lora_multipliers=args.lora_multiplier,
|
| 482 |
+
disable_numpy_memmap=args.disable_numpy_memmap,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
# apply RoPE scaling factor
|
| 486 |
+
if args.rope_scaling_timestep_threshold is not None:
|
| 487 |
+
logger.info(
|
| 488 |
+
f"Applying RoPE scaling factor {args.rope_scaling_factor} for timesteps >= {args.rope_scaling_timestep_threshold}"
|
| 489 |
+
)
|
| 490 |
+
model.enable_rope_scaling(args.rope_scaling_timestep_threshold, args.rope_scaling_factor)
|
| 491 |
+
|
| 492 |
+
# magcache
|
| 493 |
+
initialize_magcache(args, model)
|
| 494 |
+
|
| 495 |
+
if args.lycoris:
|
| 496 |
+
# merge LoRA weights statically
|
| 497 |
+
if args.lora_weight is not None and len(args.lora_weight) > 0:
|
| 498 |
+
# ugly hack to common merge_lora_weights function
|
| 499 |
+
merge_lora_weights(
|
| 500 |
+
lora_framepack,
|
| 501 |
+
model,
|
| 502 |
+
args.lora_weight,
|
| 503 |
+
args.lora_multiplier,
|
| 504 |
+
args.include_patterns,
|
| 505 |
+
args.exclude_patterns,
|
| 506 |
+
device,
|
| 507 |
+
lycoris=True,
|
| 508 |
+
save_merged_model=args.save_merged_model,
|
| 509 |
+
converter=convert_lora_for_framepack,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
if args.fp8_scaled:
|
| 513 |
+
state_dict = model.state_dict() # bf16 state dict
|
| 514 |
+
|
| 515 |
+
# if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy)
|
| 516 |
+
move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU
|
| 517 |
+
state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=False) # args.fp8_fast)
|
| 518 |
+
|
| 519 |
+
info = model.load_state_dict(state_dict, strict=True, assign=True)
|
| 520 |
+
logger.info(f"Loaded FP8 optimized weights: {info}")
|
| 521 |
+
|
| 522 |
+
# if we only want to save the model, we can skip the rest
|
| 523 |
+
if args.save_merged_model:
|
| 524 |
+
return model
|
| 525 |
+
|
| 526 |
+
if not args.fp8_scaled:
|
| 527 |
+
# simple cast to dit_dtype
|
| 528 |
+
target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict)
|
| 529 |
+
target_device = None
|
| 530 |
+
|
| 531 |
+
if args.fp8:
|
| 532 |
+
target_dtype = torch.float8e4m3fn
|
| 533 |
+
|
| 534 |
+
if args.blocks_to_swap == 0:
|
| 535 |
+
logger.info(f"Move model to device: {device}")
|
| 536 |
+
target_device = device
|
| 537 |
+
|
| 538 |
+
if target_device is not None and target_dtype is not None:
|
| 539 |
+
model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations
|
| 540 |
+
|
| 541 |
+
if args.blocks_to_swap > 0:
|
| 542 |
+
logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}")
|
| 543 |
+
model.enable_block_swap(
|
| 544 |
+
args.blocks_to_swap, device, supports_backward=False, use_pinned_memory=args.use_pinned_memory_for_block_swap
|
| 545 |
+
)
|
| 546 |
+
model.move_to_device_except_swap_blocks(device)
|
| 547 |
+
model.prepare_block_swap_before_forward()
|
| 548 |
+
else:
|
| 549 |
+
# make sure the model is on the right device
|
| 550 |
+
model.to(device)
|
| 551 |
+
|
| 552 |
+
if args.compile:
|
| 553 |
+
model = model_utils.compile_transformer(
|
| 554 |
+
args, model, [model.transformer_blocks, model.single_transformer_blocks], disable_linear=args.blocks_to_swap > 0
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
model.eval().requires_grad_(False)
|
| 558 |
+
clean_memory_on_device(device)
|
| 559 |
+
|
| 560 |
+
return model
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
# endregion
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
def decode_latent(
|
| 567 |
+
latent_window_size: int,
|
| 568 |
+
total_latent_sections: int,
|
| 569 |
+
bulk_decode: bool,
|
| 570 |
+
vae: AutoencoderKLCausal3D,
|
| 571 |
+
latent: torch.Tensor,
|
| 572 |
+
device: torch.device,
|
| 573 |
+
one_frame_inference_mode: bool = False,
|
| 574 |
+
) -> torch.Tensor:
|
| 575 |
+
logger.info("Decoding video...")
|
| 576 |
+
if latent.ndim == 4:
|
| 577 |
+
latent = latent.unsqueeze(0) # add batch dimension
|
| 578 |
+
|
| 579 |
+
vae.to(device)
|
| 580 |
+
if not bulk_decode and not one_frame_inference_mode:
|
| 581 |
+
latent_window_size = latent_window_size # default is 9
|
| 582 |
+
# total_latent_sections = (args.video_seconds * 30) / (latent_window_size * 4)
|
| 583 |
+
# total_latent_sections = int(max(round(total_latent_sections), 1))
|
| 584 |
+
num_frames = latent_window_size * 4 - 3
|
| 585 |
+
|
| 586 |
+
latents_to_decode = []
|
| 587 |
+
latent_frame_index = 0
|
| 588 |
+
for i in range(total_latent_sections - 1, -1, -1):
|
| 589 |
+
is_last_section = i == total_latent_sections - 1
|
| 590 |
+
generated_latent_frames = (num_frames + 3) // 4 + (1 if is_last_section else 0)
|
| 591 |
+
section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2)
|
| 592 |
+
|
| 593 |
+
section_latent = latent[:, :, latent_frame_index : latent_frame_index + section_latent_frames, :, :]
|
| 594 |
+
if section_latent.shape[2] > 0:
|
| 595 |
+
latents_to_decode.append(section_latent)
|
| 596 |
+
|
| 597 |
+
latent_frame_index += generated_latent_frames
|
| 598 |
+
|
| 599 |
+
latents_to_decode = latents_to_decode[::-1] # reverse the order of latents to decode
|
| 600 |
+
|
| 601 |
+
history_pixels = None
|
| 602 |
+
for latent in tqdm(latents_to_decode):
|
| 603 |
+
if history_pixels is None:
|
| 604 |
+
history_pixels = hunyuan.vae_decode(latent, vae).cpu()
|
| 605 |
+
else:
|
| 606 |
+
overlapped_frames = latent_window_size * 4 - 3
|
| 607 |
+
current_pixels = hunyuan.vae_decode(latent, vae).cpu()
|
| 608 |
+
history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
|
| 609 |
+
clean_memory_on_device(device)
|
| 610 |
+
else:
|
| 611 |
+
# bulk decode
|
| 612 |
+
logger.info("Bulk decoding or one frame inference")
|
| 613 |
+
if not one_frame_inference_mode:
|
| 614 |
+
history_pixels = hunyuan.vae_decode(latent, vae).cpu() # normal
|
| 615 |
+
else:
|
| 616 |
+
# one frame inference
|
| 617 |
+
history_pixels = [hunyuan.vae_decode(latent[:, :, i : i + 1, :, :], vae).cpu() for i in range(latent.shape[2])]
|
| 618 |
+
history_pixels = torch.cat(history_pixels, dim=2)
|
| 619 |
+
|
| 620 |
+
vae.to("cpu")
|
| 621 |
+
|
| 622 |
+
logger.info(f"Decoded. Pixel shape {history_pixels.shape}")
|
| 623 |
+
return history_pixels[0] # remove batch dimension
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def prepare_image_inputs(
|
| 627 |
+
args: argparse.Namespace,
|
| 628 |
+
device: torch.device,
|
| 629 |
+
vae: AutoencoderKLCausal3D,
|
| 630 |
+
shared_models: Optional[Dict] = None,
|
| 631 |
+
) -> Dict[str, Any]:
|
| 632 |
+
"""Prepare image-related inputs for I2V: VAE encoding and image encoder features."""
|
| 633 |
+
height, width, video_seconds = check_inputs(args)
|
| 634 |
+
|
| 635 |
+
# prepare image
|
| 636 |
+
def preprocess_image(image_path: str):
|
| 637 |
+
image = Image.open(image_path)
|
| 638 |
+
if image.mode == "RGBA":
|
| 639 |
+
alpha = image.split()[-1]
|
| 640 |
+
else:
|
| 641 |
+
alpha = None
|
| 642 |
+
image = image.convert("RGB")
|
| 643 |
+
|
| 644 |
+
image_np = np.array(image) # PIL to numpy, HWC
|
| 645 |
+
|
| 646 |
+
image_np = image_video_dataset.resize_image_to_bucket(image_np, (width, height))
|
| 647 |
+
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0 # -1 to 1.0, HWC
|
| 648 |
+
image_tensor = image_tensor.permute(2, 0, 1)[None, :, None] # HWC -> CHW -> NCFHW, N=1, C=3, F=1
|
| 649 |
+
return image_tensor, image_np, alpha
|
| 650 |
+
|
| 651 |
+
section_image_paths = parse_section_strings(args.image_path)
|
| 652 |
+
|
| 653 |
+
section_images = {}
|
| 654 |
+
if section_image_paths:
|
| 655 |
+
for index, image_path in section_image_paths.items():
|
| 656 |
+
img_tensor, img_np, _ = preprocess_image(image_path)
|
| 657 |
+
section_images[index] = (img_tensor, img_np)
|
| 658 |
+
else:
|
| 659 |
+
# image_path should be given, if not, we create a placeholder image (black image)
|
| 660 |
+
placeholder_img_np = np.zeros((height, width, 3), dtype=np.uint8) # Placeholder
|
| 661 |
+
placeholder_img_tensor = torch.zeros(1, 3, 1, height, width)
|
| 662 |
+
section_images[0] = (placeholder_img_tensor, placeholder_img_np)
|
| 663 |
+
section_image_paths[0] = "placeholder_image"
|
| 664 |
+
|
| 665 |
+
# check end image
|
| 666 |
+
if args.end_image_path is not None:
|
| 667 |
+
end_image_tensor, _, _ = preprocess_image(args.end_image_path)
|
| 668 |
+
else:
|
| 669 |
+
end_image_tensor = None
|
| 670 |
+
|
| 671 |
+
# check control images
|
| 672 |
+
if args.control_image_path is not None and len(args.control_image_path) > 0:
|
| 673 |
+
control_image_tensors = []
|
| 674 |
+
control_mask_images = []
|
| 675 |
+
for ctrl_image_path in args.control_image_path:
|
| 676 |
+
control_image_tensor, _, control_mask = preprocess_image(ctrl_image_path)
|
| 677 |
+
control_image_tensors.append(control_image_tensor)
|
| 678 |
+
control_mask_images.append(control_mask)
|
| 679 |
+
else:
|
| 680 |
+
control_image_tensors = None # Keep as None if not provided
|
| 681 |
+
control_mask_images = None
|
| 682 |
+
|
| 683 |
+
# load image encoder
|
| 684 |
+
# VAE is passed as an argument, assume it's on the correct device or handled by caller
|
| 685 |
+
if shared_models is not None and "feature_extractor" in shared_models and "image_encoder" in shared_models:
|
| 686 |
+
feature_extractor, image_encoder = shared_models["feature_extractor"], shared_models["image_encoder"]
|
| 687 |
+
else:
|
| 688 |
+
feature_extractor, image_encoder = load_image_encoders(args)
|
| 689 |
+
|
| 690 |
+
image_encoder_original_device = image_encoder.device
|
| 691 |
+
image_encoder.to(device)
|
| 692 |
+
|
| 693 |
+
section_image_encoder_last_hidden_states = {}
|
| 694 |
+
for index, (img_tensor, img_np) in section_images.items():
|
| 695 |
+
with torch.no_grad():
|
| 696 |
+
image_encoder_output = hf_clip_vision_encode(img_np, feature_extractor, image_encoder)
|
| 697 |
+
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state.cpu()
|
| 698 |
+
section_image_encoder_last_hidden_states[index] = image_encoder_last_hidden_state
|
| 699 |
+
|
| 700 |
+
if not (shared_models and "image_encoder" in shared_models): # if loaded locally
|
| 701 |
+
del image_encoder, feature_extractor
|
| 702 |
+
else: # if shared, move back to original device (likely CPU)
|
| 703 |
+
image_encoder.to(image_encoder_original_device)
|
| 704 |
+
|
| 705 |
+
clean_memory_on_device(device)
|
| 706 |
+
|
| 707 |
+
# VAE encoding
|
| 708 |
+
logger.info("Encoding image to latent space with VAE")
|
| 709 |
+
vae_original_device = vae.device
|
| 710 |
+
vae.to(device)
|
| 711 |
+
|
| 712 |
+
section_start_latents = {}
|
| 713 |
+
for index, (img_tensor, img_np) in section_images.items():
|
| 714 |
+
start_latent = hunyuan.vae_encode(img_tensor.to(device), vae).cpu() # ensure tensor is on device
|
| 715 |
+
section_start_latents[index] = start_latent
|
| 716 |
+
|
| 717 |
+
end_latent = hunyuan.vae_encode(end_image_tensor.to(device), vae).cpu() if end_image_tensor is not None else None
|
| 718 |
+
|
| 719 |
+
control_latents = None
|
| 720 |
+
if control_image_tensors is not None:
|
| 721 |
+
control_latents = []
|
| 722 |
+
for ctrl_image_tensor in control_image_tensors:
|
| 723 |
+
control_latent = hunyuan.vae_encode(ctrl_image_tensor.to(device), vae).cpu()
|
| 724 |
+
control_latents.append(control_latent)
|
| 725 |
+
|
| 726 |
+
vae.to(vae_original_device) # Move VAE back to its original device
|
| 727 |
+
clean_memory_on_device(device)
|
| 728 |
+
|
| 729 |
+
arg_c_img = {}
|
| 730 |
+
for index in section_images.keys():
|
| 731 |
+
image_encoder_last_hidden_state = section_image_encoder_last_hidden_states[index]
|
| 732 |
+
start_latent = section_start_latents[index]
|
| 733 |
+
arg_c_img_i = {
|
| 734 |
+
"image_encoder_last_hidden_state": image_encoder_last_hidden_state,
|
| 735 |
+
"start_latent": start_latent,
|
| 736 |
+
"image_path": section_image_paths.get(index, "placeholder_image"),
|
| 737 |
+
}
|
| 738 |
+
arg_c_img[index] = arg_c_img_i
|
| 739 |
+
|
| 740 |
+
return {
|
| 741 |
+
"height": height,
|
| 742 |
+
"width": width,
|
| 743 |
+
"video_seconds": video_seconds,
|
| 744 |
+
"context_img": arg_c_img,
|
| 745 |
+
"end_latent": end_latent,
|
| 746 |
+
"control_latents": control_latents,
|
| 747 |
+
"control_mask_images": control_mask_images,
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
def prepare_text_inputs(
|
| 752 |
+
args: argparse.Namespace,
|
| 753 |
+
device: torch.device,
|
| 754 |
+
shared_models: Optional[Dict] = None,
|
| 755 |
+
) -> Dict[str, Any]:
|
| 756 |
+
"""Prepare text-related inputs for I2V: LLM and TextEncoder encoding."""
|
| 757 |
+
|
| 758 |
+
n_prompt = args.negative_prompt if args.negative_prompt else ""
|
| 759 |
+
section_prompts = parse_section_strings(args.prompt if args.prompt else " ") # Ensure prompt is not None
|
| 760 |
+
|
| 761 |
+
# load text encoder: conds_cache holds cached encodings for prompts without padding
|
| 762 |
+
conds_cache = {}
|
| 763 |
+
if shared_models is not None:
|
| 764 |
+
tokenizer1, text_encoder1 = shared_models.get("tokenizer1"), shared_models.get("text_encoder1")
|
| 765 |
+
tokenizer2, text_encoder2 = shared_models.get("tokenizer2"), shared_models.get("text_encoder2")
|
| 766 |
+
if "conds_cache" in shared_models: # Use shared cache if available
|
| 767 |
+
conds_cache = shared_models["conds_cache"]
|
| 768 |
+
# text_encoder1 and text_encoder2 are on device (batched inference) or CPU (interactive inference)
|
| 769 |
+
else: # Load if not in shared_models
|
| 770 |
+
tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device) # Load to GPU
|
| 771 |
+
tokenizer2, text_encoder2 = load_text_encoder2(args) # Load to CPU
|
| 772 |
+
text_encoder2.to(device) # Move text_encoder2 to the same device as text_encoder1
|
| 773 |
+
|
| 774 |
+
# Store original devices to move back later if they were shared. This does nothing if shared_models is None
|
| 775 |
+
text_encoder1_original_device = text_encoder1.device if text_encoder1 else None
|
| 776 |
+
text_encoder2_original_device = text_encoder2.device if text_encoder2 else None
|
| 777 |
+
|
| 778 |
+
logger.info("Encoding prompt with Text Encoders")
|
| 779 |
+
llama_vecs = {}
|
| 780 |
+
llama_attention_masks = {}
|
| 781 |
+
clip_l_poolers = {}
|
| 782 |
+
|
| 783 |
+
# Ensure text_encoder1 and text_encoder2 are not None before proceeding
|
| 784 |
+
if not text_encoder1 or not text_encoder2 or not tokenizer1 or not tokenizer2:
|
| 785 |
+
raise ValueError("Text encoders or tokenizers are not loaded properly.")
|
| 786 |
+
|
| 787 |
+
# Define a function to move models to device if needed
|
| 788 |
+
# This is to avoid moving models if not needed, especially in interactive mode
|
| 789 |
+
model_is_moved = False
|
| 790 |
+
|
| 791 |
+
def move_models_to_device_if_needed():
|
| 792 |
+
nonlocal model_is_moved
|
| 793 |
+
nonlocal shared_models
|
| 794 |
+
|
| 795 |
+
if model_is_moved:
|
| 796 |
+
return
|
| 797 |
+
model_is_moved = True
|
| 798 |
+
|
| 799 |
+
logger.info(f"Moving DiT and Text Encoders to appropriate device: {device} or CPU")
|
| 800 |
+
if shared_models and "model" in shared_models: # DiT model is shared
|
| 801 |
+
if args.blocks_to_swap > 0:
|
| 802 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 803 |
+
time.sleep(5)
|
| 804 |
+
model = shared_models["model"]
|
| 805 |
+
model.to("cpu")
|
| 806 |
+
clean_memory_on_device(device) # clean memory on device before moving models
|
| 807 |
+
|
| 808 |
+
text_encoder1.to(device)
|
| 809 |
+
text_encoder2.to(device)
|
| 810 |
+
|
| 811 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 812 |
+
for index, prompt in section_prompts.items():
|
| 813 |
+
if prompt in conds_cache:
|
| 814 |
+
llama_vec, clip_l_pooler = conds_cache[prompt]
|
| 815 |
+
else:
|
| 816 |
+
move_models_to_device_if_needed()
|
| 817 |
+
llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(
|
| 818 |
+
prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2, custom_system_prompt=args.custom_system_prompt
|
| 819 |
+
)
|
| 820 |
+
llama_vec = llama_vec.cpu()
|
| 821 |
+
clip_l_pooler = clip_l_pooler.cpu()
|
| 822 |
+
conds_cache[prompt] = (llama_vec, clip_l_pooler)
|
| 823 |
+
|
| 824 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
| 825 |
+
llama_vecs[index] = llama_vec
|
| 826 |
+
llama_attention_masks[index] = llama_attention_mask
|
| 827 |
+
clip_l_poolers[index] = clip_l_pooler
|
| 828 |
+
|
| 829 |
+
if args.guidance_scale == 1.0:
|
| 830 |
+
# llama_vecs[0] should always exist because prompt is guaranteed to be non-empty
|
| 831 |
+
first_llama_vec = llama_vecs.get(0) # this is cropped or padded, but it's okay for null context
|
| 832 |
+
first_clip_l_pooler = clip_l_poolers.get(0)
|
| 833 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(first_llama_vec), torch.zeros_like(first_clip_l_pooler)
|
| 834 |
+
|
| 835 |
+
else:
|
| 836 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 837 |
+
if n_prompt in conds_cache:
|
| 838 |
+
llama_vec_n, clip_l_pooler_n = conds_cache[n_prompt]
|
| 839 |
+
else:
|
| 840 |
+
move_models_to_device_if_needed()
|
| 841 |
+
llama_vec_n, clip_l_pooler_n = hunyuan.encode_prompt_conds(
|
| 842 |
+
n_prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2, custom_system_prompt=args.custom_system_prompt
|
| 843 |
+
)
|
| 844 |
+
llama_vec_n = llama_vec_n.cpu()
|
| 845 |
+
clip_l_pooler_n = clip_l_pooler_n.cpu()
|
| 846 |
+
conds_cache[n_prompt] = (llama_vec_n, clip_l_pooler_n)
|
| 847 |
+
|
| 848 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
| 849 |
+
|
| 850 |
+
if not (shared_models and "text_encoder1" in shared_models): # if loaded locally
|
| 851 |
+
del tokenizer1, text_encoder1, tokenizer2, text_encoder2
|
| 852 |
+
gc.collect() # transformer==4.54.1 seems to need this to free memory
|
| 853 |
+
else: # if shared, move back to original device (likely CPU)
|
| 854 |
+
if text_encoder1:
|
| 855 |
+
text_encoder1.to(text_encoder1_original_device)
|
| 856 |
+
if text_encoder2:
|
| 857 |
+
text_encoder2.to(text_encoder2_original_device)
|
| 858 |
+
|
| 859 |
+
clean_memory_on_device(device)
|
| 860 |
+
|
| 861 |
+
arg_c = {}
|
| 862 |
+
for index in llama_vecs.keys():
|
| 863 |
+
llama_vec = llama_vecs[index]
|
| 864 |
+
llama_attention_mask = llama_attention_masks[index]
|
| 865 |
+
clip_l_pooler = clip_l_poolers[index]
|
| 866 |
+
arg_c_i = {
|
| 867 |
+
"llama_vec": llama_vec,
|
| 868 |
+
"llama_attention_mask": llama_attention_mask,
|
| 869 |
+
"clip_l_pooler": clip_l_pooler,
|
| 870 |
+
"prompt": section_prompts[index],
|
| 871 |
+
}
|
| 872 |
+
arg_c[index] = arg_c_i
|
| 873 |
+
|
| 874 |
+
arg_null = {
|
| 875 |
+
"llama_vec": llama_vec_n,
|
| 876 |
+
"llama_attention_mask": llama_attention_mask_n,
|
| 877 |
+
"clip_l_pooler": clip_l_pooler_n,
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
return {
|
| 881 |
+
"context": arg_c,
|
| 882 |
+
"context_null": arg_null,
|
| 883 |
+
}
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def prepare_i2v_inputs(
|
| 887 |
+
args: argparse.Namespace,
|
| 888 |
+
device: torch.device,
|
| 889 |
+
vae: AutoencoderKLCausal3D, # VAE is now explicitly passed
|
| 890 |
+
shared_models: Optional[Dict] = None,
|
| 891 |
+
) -> Tuple[int, int, float, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]:
|
| 892 |
+
"""Prepare inputs for I2V by calling image and text preparation functions."""
|
| 893 |
+
|
| 894 |
+
image_data = prepare_image_inputs(args, device, vae, shared_models)
|
| 895 |
+
text_data = prepare_text_inputs(args, device, shared_models)
|
| 896 |
+
|
| 897 |
+
return (
|
| 898 |
+
image_data["height"],
|
| 899 |
+
image_data["width"],
|
| 900 |
+
image_data["video_seconds"],
|
| 901 |
+
text_data["context"],
|
| 902 |
+
text_data["context_null"],
|
| 903 |
+
image_data["context_img"],
|
| 904 |
+
image_data["end_latent"],
|
| 905 |
+
image_data["control_latents"],
|
| 906 |
+
image_data["control_mask_images"],
|
| 907 |
+
)
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
# def setup_scheduler(args: argparse.Namespace, config, device: torch.device) -> Tuple[Any, torch.Tensor]:
|
| 911 |
+
# """setup scheduler for sampling
|
| 912 |
+
|
| 913 |
+
# Args:
|
| 914 |
+
# args: command line arguments
|
| 915 |
+
# config: model configuration
|
| 916 |
+
# device: device to use
|
| 917 |
+
|
| 918 |
+
# Returns:
|
| 919 |
+
# Tuple[Any, torch.Tensor]: (scheduler, timesteps)
|
| 920 |
+
# """
|
| 921 |
+
# if args.sample_solver == "unipc":
|
| 922 |
+
# scheduler = FlowUniPCMultistepScheduler(num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False)
|
| 923 |
+
# scheduler.set_timesteps(args.infer_steps, device=device, shift=args.flow_shift)
|
| 924 |
+
# timesteps = scheduler.timesteps
|
| 925 |
+
# elif args.sample_solver == "dpm++":
|
| 926 |
+
# scheduler = FlowDPMSolverMultistepScheduler(
|
| 927 |
+
# num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False
|
| 928 |
+
# )
|
| 929 |
+
# sampling_sigmas = get_sampling_sigmas(args.infer_steps, args.flow_shift)
|
| 930 |
+
# timesteps, _ = retrieve_timesteps(scheduler, device=device, sigmas=sampling_sigmas)
|
| 931 |
+
# elif args.sample_solver == "vanilla":
|
| 932 |
+
# scheduler = FlowMatchDiscreteScheduler(num_train_timesteps=config.num_train_timesteps, shift=args.flow_shift)
|
| 933 |
+
# scheduler.set_timesteps(args.infer_steps, device=device)
|
| 934 |
+
# timesteps = scheduler.timesteps
|
| 935 |
+
|
| 936 |
+
# # FlowMatchDiscreteScheduler does not support generator argument in step method
|
| 937 |
+
# org_step = scheduler.step
|
| 938 |
+
|
| 939 |
+
# def step_wrapper(
|
| 940 |
+
# model_output: torch.Tensor,
|
| 941 |
+
# timestep: Union[int, torch.Tensor],
|
| 942 |
+
# sample: torch.Tensor,
|
| 943 |
+
# return_dict: bool = True,
|
| 944 |
+
# generator=None,
|
| 945 |
+
# ):
|
| 946 |
+
# return org_step(model_output, timestep, sample, return_dict=return_dict)
|
| 947 |
+
|
| 948 |
+
# scheduler.step = step_wrapper
|
| 949 |
+
# else:
|
| 950 |
+
# raise NotImplementedError("Unsupported solver.")
|
| 951 |
+
|
| 952 |
+
# return scheduler, timesteps
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
def convert_lora_for_framepack(lora_sd: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
| 956 |
+
# Check the format of the LoRA file
|
| 957 |
+
keys = list(lora_sd.keys())
|
| 958 |
+
if keys[0].startswith("lora_unet_"):
|
| 959 |
+
# logging.info(f"Musubi Tuner LoRA detected")
|
| 960 |
+
pass
|
| 961 |
+
|
| 962 |
+
else:
|
| 963 |
+
transformer_prefixes = ["diffusion_model", "transformer"] # to ignore Text Encoder modules
|
| 964 |
+
lora_suffix = None
|
| 965 |
+
prefix = None
|
| 966 |
+
for key in keys:
|
| 967 |
+
if lora_suffix is None and "lora_A" in key:
|
| 968 |
+
lora_suffix = "lora_A"
|
| 969 |
+
if prefix is None:
|
| 970 |
+
pfx = key.split(".")[0]
|
| 971 |
+
if pfx in transformer_prefixes:
|
| 972 |
+
prefix = pfx
|
| 973 |
+
if lora_suffix is not None and prefix is not None:
|
| 974 |
+
break
|
| 975 |
+
|
| 976 |
+
if lora_suffix == "lora_A" and prefix is not None:
|
| 977 |
+
logging.info("Diffusion-pipe (?) LoRA detected, converting to the default LoRA format")
|
| 978 |
+
lora_sd = convert_lora_from_diffusion_pipe_or_something(lora_sd, "lora_unet_")
|
| 979 |
+
|
| 980 |
+
else:
|
| 981 |
+
logging.info("LoRA file format not recognized. Using it as-is.")
|
| 982 |
+
|
| 983 |
+
# Check LoRA is for FramePack or for HunyuanVideo
|
| 984 |
+
is_hunyuan = False
|
| 985 |
+
for key in lora_sd.keys():
|
| 986 |
+
if "double_blocks" in key or "single_blocks" in key:
|
| 987 |
+
is_hunyuan = True
|
| 988 |
+
break
|
| 989 |
+
if is_hunyuan:
|
| 990 |
+
logging.info("HunyuanVideo LoRA detected, converting to FramePack format")
|
| 991 |
+
lora_sd = convert_hunyuan_to_framepack(lora_sd)
|
| 992 |
+
|
| 993 |
+
return lora_sd
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
def convert_lora_from_diffusion_pipe_or_something(lora_sd: dict[str, torch.Tensor], prefix: str) -> dict[str, torch.Tensor]:
|
| 997 |
+
"""
|
| 998 |
+
Convert LoRA weights to the format used by the diffusion pipeline to Musubi Tuner.
|
| 999 |
+
Copy from Musubi Tuner repo.
|
| 1000 |
+
"""
|
| 1001 |
+
# convert from diffusers(?) to default LoRA
|
| 1002 |
+
# Diffusers format: {"diffusion_model.module.name.lora_A.weight": weight, "diffusion_model.module.name.lora_B.weight": weight, ...}
|
| 1003 |
+
# default LoRA format: {"prefix_module_name.lora_down.weight": weight, "prefix_module_name.lora_up.weight": weight, ...}
|
| 1004 |
+
|
| 1005 |
+
# note: Diffusers has no alpha, so alpha is set to rank
|
| 1006 |
+
new_weights_sd = {}
|
| 1007 |
+
lora_dims = {}
|
| 1008 |
+
for key, weight in lora_sd.items():
|
| 1009 |
+
diffusers_prefix, key_body = key.split(".", 1)
|
| 1010 |
+
if diffusers_prefix != "diffusion_model" and diffusers_prefix != "transformer":
|
| 1011 |
+
print(f"unexpected key: {key} in diffusers format")
|
| 1012 |
+
continue
|
| 1013 |
+
|
| 1014 |
+
new_key = f"{prefix}{key_body}".replace(".", "_").replace("_lora_A_", ".lora_down.").replace("_lora_B_", ".lora_up.")
|
| 1015 |
+
new_weights_sd[new_key] = weight
|
| 1016 |
+
|
| 1017 |
+
lora_name = new_key.split(".")[0] # before first dot
|
| 1018 |
+
if lora_name not in lora_dims and "lora_down" in new_key:
|
| 1019 |
+
lora_dims[lora_name] = weight.shape[0]
|
| 1020 |
+
|
| 1021 |
+
# add alpha with rank
|
| 1022 |
+
for lora_name, dim in lora_dims.items():
|
| 1023 |
+
new_weights_sd[f"{lora_name}.alpha"] = torch.tensor(dim)
|
| 1024 |
+
|
| 1025 |
+
return new_weights_sd
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
def convert_hunyuan_to_framepack(lora_sd: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
| 1029 |
+
"""
|
| 1030 |
+
Convert HunyuanVideo LoRA weights to FramePack format.
|
| 1031 |
+
"""
|
| 1032 |
+
new_lora_sd = {}
|
| 1033 |
+
for key, weight in lora_sd.items():
|
| 1034 |
+
if "double_blocks" in key:
|
| 1035 |
+
key = key.replace("double_blocks", "transformer_blocks")
|
| 1036 |
+
key = key.replace("img_mod_linear", "norm1_linear")
|
| 1037 |
+
key = key.replace("img_attn_qkv", "attn_to_QKV") # split later
|
| 1038 |
+
key = key.replace("img_attn_proj", "attn_to_out_0")
|
| 1039 |
+
key = key.replace("img_mlp_fc1", "ff_net_0_proj")
|
| 1040 |
+
key = key.replace("img_mlp_fc2", "ff_net_2")
|
| 1041 |
+
key = key.replace("txt_mod_linear", "norm1_context_linear")
|
| 1042 |
+
key = key.replace("txt_attn_qkv", "attn_add_QKV_proj") # split later
|
| 1043 |
+
key = key.replace("txt_attn_proj", "attn_to_add_out")
|
| 1044 |
+
key = key.replace("txt_mlp_fc1", "ff_context_net_0_proj")
|
| 1045 |
+
key = key.replace("txt_mlp_fc2", "ff_context_net_2")
|
| 1046 |
+
elif "single_blocks" in key:
|
| 1047 |
+
key = key.replace("single_blocks", "single_transformer_blocks")
|
| 1048 |
+
key = key.replace("linear1", "attn_to_QKVM") # split later
|
| 1049 |
+
key = key.replace("linear2", "proj_out")
|
| 1050 |
+
key = key.replace("modulation_linear", "norm_linear")
|
| 1051 |
+
else:
|
| 1052 |
+
print(f"Unsupported module name: {key}, only double_blocks and single_blocks are supported")
|
| 1053 |
+
continue
|
| 1054 |
+
|
| 1055 |
+
if "QKVM" in key:
|
| 1056 |
+
# split QKVM into Q, K, V, M
|
| 1057 |
+
key_q = key.replace("QKVM", "q")
|
| 1058 |
+
key_k = key.replace("QKVM", "k")
|
| 1059 |
+
key_v = key.replace("QKVM", "v")
|
| 1060 |
+
key_m = key.replace("attn_to_QKVM", "proj_mlp")
|
| 1061 |
+
if "_down" in key or "alpha" in key:
|
| 1062 |
+
# copy QKVM weight or alpha to Q, K, V, M
|
| 1063 |
+
assert "alpha" in key or weight.size(1) == 3072, f"QKVM weight size mismatch: {key}. {weight.size()}"
|
| 1064 |
+
new_lora_sd[key_q] = weight
|
| 1065 |
+
new_lora_sd[key_k] = weight
|
| 1066 |
+
new_lora_sd[key_v] = weight
|
| 1067 |
+
new_lora_sd[key_m] = weight
|
| 1068 |
+
elif "_up" in key:
|
| 1069 |
+
# split QKVM weight into Q, K, V, M
|
| 1070 |
+
assert weight.size(0) == 21504, f"QKVM weight size mismatch: {key}. {weight.size()}"
|
| 1071 |
+
new_lora_sd[key_q] = weight[:3072]
|
| 1072 |
+
new_lora_sd[key_k] = weight[3072 : 3072 * 2]
|
| 1073 |
+
new_lora_sd[key_v] = weight[3072 * 2 : 3072 * 3]
|
| 1074 |
+
new_lora_sd[key_m] = weight[3072 * 3 :] # 21504 - 3072 * 3 = 12288
|
| 1075 |
+
else:
|
| 1076 |
+
print(f"Unsupported module name: {key}")
|
| 1077 |
+
continue
|
| 1078 |
+
elif "QKV" in key:
|
| 1079 |
+
# split QKV into Q, K, V
|
| 1080 |
+
key_q = key.replace("QKV", "q")
|
| 1081 |
+
key_k = key.replace("QKV", "k")
|
| 1082 |
+
key_v = key.replace("QKV", "v")
|
| 1083 |
+
if "_down" in key or "alpha" in key:
|
| 1084 |
+
# copy QKV weight or alpha to Q, K, V
|
| 1085 |
+
assert "alpha" in key or weight.size(1) == 3072, f"QKV weight size mismatch: {key}. {weight.size()}"
|
| 1086 |
+
new_lora_sd[key_q] = weight
|
| 1087 |
+
new_lora_sd[key_k] = weight
|
| 1088 |
+
new_lora_sd[key_v] = weight
|
| 1089 |
+
elif "_up" in key:
|
| 1090 |
+
# split QKV weight into Q, K, V
|
| 1091 |
+
assert weight.size(0) == 3072 * 3, f"QKV weight size mismatch: {key}. {weight.size()}"
|
| 1092 |
+
new_lora_sd[key_q] = weight[:3072]
|
| 1093 |
+
new_lora_sd[key_k] = weight[3072 : 3072 * 2]
|
| 1094 |
+
new_lora_sd[key_v] = weight[3072 * 2 :]
|
| 1095 |
+
else:
|
| 1096 |
+
print(f"Unsupported module name: {key}")
|
| 1097 |
+
continue
|
| 1098 |
+
else:
|
| 1099 |
+
# no split needed
|
| 1100 |
+
new_lora_sd[key] = weight
|
| 1101 |
+
|
| 1102 |
+
return new_lora_sd
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
def initialize_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None:
|
| 1106 |
+
if args.magcache_mag_ratios is None and not args.magcache_calibration:
|
| 1107 |
+
return
|
| 1108 |
+
|
| 1109 |
+
# parse mag_ratios
|
| 1110 |
+
mag_ratios = None # calibration mode
|
| 1111 |
+
if args.magcache_mag_ratios is not None:
|
| 1112 |
+
mag_ratios = [float(ratio) for ratio in args.magcache_mag_ratios.split(",")]
|
| 1113 |
+
if len(mag_ratios) == 1 and mag_ratios[0] == 0:
|
| 1114 |
+
# use default mag_ratios
|
| 1115 |
+
mag_ratios = None
|
| 1116 |
+
|
| 1117 |
+
logger.info(
|
| 1118 |
+
f"Initializing MagCache with mag_ratios: {mag_ratios}, retention_ratio: {args.magcache_retention_ratio}, "
|
| 1119 |
+
f"magcache_thresh: {args.magcache_threshold}, K: {args.magcache_k}, calibration: {args.magcache_calibration}"
|
| 1120 |
+
)
|
| 1121 |
+
model.initialize_magcache(
|
| 1122 |
+
enable=True,
|
| 1123 |
+
retention_ratio=args.magcache_retention_ratio,
|
| 1124 |
+
mag_ratios=mag_ratios,
|
| 1125 |
+
magcache_thresh=args.magcache_threshold,
|
| 1126 |
+
K=args.magcache_k,
|
| 1127 |
+
calibration=args.magcache_calibration,
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
|
| 1131 |
+
def preprocess_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None:
|
| 1132 |
+
if args.magcache_mag_ratios is None and not args.magcache_calibration:
|
| 1133 |
+
return
|
| 1134 |
+
|
| 1135 |
+
model.reset_magcache(args.infer_steps)
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
def postprocess_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None:
|
| 1139 |
+
if args.magcache_mag_ratios is None and not args.magcache_calibration:
|
| 1140 |
+
return
|
| 1141 |
+
if not args.magcache_calibration:
|
| 1142 |
+
return
|
| 1143 |
+
|
| 1144 |
+
# print mag ratios
|
| 1145 |
+
norm_ratio, norm_std, cos_dis = model.get_calibration_data()
|
| 1146 |
+
logger.info("MagCache calibration data:")
|
| 1147 |
+
logger.info(f" - norm_ratio: {norm_ratio}")
|
| 1148 |
+
logger.info(f" - norm_std: {norm_std}")
|
| 1149 |
+
logger.info(f" - cos_dis: {cos_dis}")
|
| 1150 |
+
logger.info("Copy and paste following values to --magcache_mag_ratios argument to use them:")
|
| 1151 |
+
print(",".join([f"{ratio:.5f}" for ratio in [1] + norm_ratio]))
|
| 1152 |
+
|
| 1153 |
+
|
| 1154 |
+
def generate(
|
| 1155 |
+
args: argparse.Namespace,
|
| 1156 |
+
gen_settings: GenerationSettings,
|
| 1157 |
+
shared_models: Optional[Dict] = None,
|
| 1158 |
+
precomputed_image_data: Optional[Dict] = None,
|
| 1159 |
+
precomputed_text_data: Optional[Dict] = None,
|
| 1160 |
+
) -> tuple[Optional[AutoencoderKLCausal3D], torch.Tensor]: # VAE can be Optional
|
| 1161 |
+
"""main function for generation
|
| 1162 |
+
|
| 1163 |
+
Args:
|
| 1164 |
+
args: command line arguments
|
| 1165 |
+
shared_models: dictionary containing pre-loaded models (mainly for DiT)
|
| 1166 |
+
precomputed_image_data: Optional dictionary with precomputed image data
|
| 1167 |
+
precomputed_text_data: Optional dictionary with precomputed text data
|
| 1168 |
+
|
| 1169 |
+
Returns:
|
| 1170 |
+
tuple: (AutoencoderKLCausal3D model (vae) or None, torch.Tensor generated latent)
|
| 1171 |
+
"""
|
| 1172 |
+
device, dit_weight_dtype = (gen_settings.device, gen_settings.dit_weight_dtype)
|
| 1173 |
+
vae_instance_for_return = None
|
| 1174 |
+
|
| 1175 |
+
# prepare seed
|
| 1176 |
+
seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1)
|
| 1177 |
+
args.seed = seed # set seed to args for saving
|
| 1178 |
+
|
| 1179 |
+
if precomputed_image_data is not None and precomputed_text_data is not None:
|
| 1180 |
+
logger.info("Using precomputed image and text data.")
|
| 1181 |
+
height = precomputed_image_data["height"]
|
| 1182 |
+
width = precomputed_image_data["width"]
|
| 1183 |
+
video_seconds = precomputed_image_data["video_seconds"]
|
| 1184 |
+
context_img = precomputed_image_data["context_img"]
|
| 1185 |
+
end_latent = precomputed_image_data["end_latent"]
|
| 1186 |
+
control_latents = precomputed_image_data["control_latents"]
|
| 1187 |
+
control_mask_images = precomputed_image_data["control_mask_images"]
|
| 1188 |
+
|
| 1189 |
+
context = precomputed_text_data["context"]
|
| 1190 |
+
context_null = precomputed_text_data["context_null"]
|
| 1191 |
+
# VAE is not loaded here if data is precomputed; decoding VAE is handled by caller (e.g., process_batch_prompts)
|
| 1192 |
+
# vae_instance_for_return remains None
|
| 1193 |
+
else:
|
| 1194 |
+
# Load VAE if not precomputed (for single/interactive mode)
|
| 1195 |
+
# shared_models for single/interactive might contain text/image encoders, but not VAE after `load_shared_models` change.
|
| 1196 |
+
# So, VAE will be loaded here for single/interactive.
|
| 1197 |
+
logger.info("No precomputed data. Preparing image and text inputs.")
|
| 1198 |
+
if shared_models and "vae" in shared_models: # Should not happen with new load_shared_models
|
| 1199 |
+
vae_instance_for_return = shared_models["vae"]
|
| 1200 |
+
else:
|
| 1201 |
+
vae_instance_for_return = load_vae(
|
| 1202 |
+
args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, args.vae_tiling, device
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
height, width, video_seconds, context, context_null, context_img, end_latent, control_latents, control_mask_images = (
|
| 1206 |
+
prepare_i2v_inputs(args, device, vae_instance_for_return, shared_models) # Pass VAE
|
| 1207 |
+
)
|
| 1208 |
+
|
| 1209 |
+
if shared_models is None or "model" not in shared_models:
|
| 1210 |
+
model = load_dit_model(args, device)
|
| 1211 |
+
if args.save_merged_model:
|
| 1212 |
+
# If we only want to save the model, we can skip the rest
|
| 1213 |
+
return model, None
|
| 1214 |
+
|
| 1215 |
+
if shared_models is not None:
|
| 1216 |
+
shared_models["model"] = model
|
| 1217 |
+
else:
|
| 1218 |
+
# use shared model
|
| 1219 |
+
model: HunyuanVideoTransformer3DModelPackedInference = shared_models["model"]
|
| 1220 |
+
model.move_to_device_except_swap_blocks(device) # Handles block swap correctly
|
| 1221 |
+
model.prepare_block_swap_before_forward()
|
| 1222 |
+
|
| 1223 |
+
# sampling
|
| 1224 |
+
latent_window_size = args.latent_window_size # default is 9
|
| 1225 |
+
# ex: (5s * 30fps) / (9 * 4) = 4.16 -> 4 sections, 60s -> 1800 / 36 = 50 sections
|
| 1226 |
+
total_latent_sections = (video_seconds * 30) / (latent_window_size * 4)
|
| 1227 |
+
total_latent_sections = int(max(round(total_latent_sections), 1))
|
| 1228 |
+
|
| 1229 |
+
# set random generator
|
| 1230 |
+
seed_g = torch.Generator(device="cpu")
|
| 1231 |
+
seed_g.manual_seed(seed)
|
| 1232 |
+
num_frames = latent_window_size * 4 - 3
|
| 1233 |
+
|
| 1234 |
+
logger.info(
|
| 1235 |
+
f"Video size: {height}x{width}@{video_seconds} (HxW@seconds), fps: {args.fps}, num sections: {total_latent_sections}, "
|
| 1236 |
+
f"infer_steps: {args.infer_steps}, frames per generation: {num_frames}"
|
| 1237 |
+
)
|
| 1238 |
+
|
| 1239 |
+
# video generation ######
|
| 1240 |
+
f1_mode = args.f1
|
| 1241 |
+
one_frame_inference = None
|
| 1242 |
+
if args.one_frame_inference is not None:
|
| 1243 |
+
one_frame_inference = set()
|
| 1244 |
+
for mode in args.one_frame_inference.split(","):
|
| 1245 |
+
one_frame_inference.add(mode.strip())
|
| 1246 |
+
|
| 1247 |
+
if one_frame_inference is not None:
|
| 1248 |
+
real_history_latents = generate_with_one_frame_inference(
|
| 1249 |
+
args,
|
| 1250 |
+
model,
|
| 1251 |
+
context,
|
| 1252 |
+
context_null,
|
| 1253 |
+
context_img,
|
| 1254 |
+
control_latents,
|
| 1255 |
+
control_mask_images,
|
| 1256 |
+
latent_window_size,
|
| 1257 |
+
height,
|
| 1258 |
+
width,
|
| 1259 |
+
device,
|
| 1260 |
+
seed_g,
|
| 1261 |
+
one_frame_inference,
|
| 1262 |
+
)
|
| 1263 |
+
else:
|
| 1264 |
+
# prepare history latents
|
| 1265 |
+
history_latents = torch.zeros((1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32)
|
| 1266 |
+
if end_latent is not None and not f1_mode:
|
| 1267 |
+
logger.info(f"Use end image(s): {args.end_image_path}")
|
| 1268 |
+
history_latents[:, :, :1] = end_latent.to(history_latents)
|
| 1269 |
+
|
| 1270 |
+
# prepare clean latents and indices
|
| 1271 |
+
if not f1_mode:
|
| 1272 |
+
# Inverted Anti-drifting
|
| 1273 |
+
total_generated_latent_frames = 0
|
| 1274 |
+
latent_paddings = reversed(range(total_latent_sections))
|
| 1275 |
+
|
| 1276 |
+
if total_latent_sections > 4 and one_frame_inference is None:
|
| 1277 |
+
# In theory the latent_paddings should follow the above sequence, but it seems that duplicating some
|
| 1278 |
+
# items looks better than expanding it when total_latent_sections > 4
|
| 1279 |
+
# One can try to remove below trick and just
|
| 1280 |
+
# use `latent_paddings = list(reversed(range(total_latent_sections)))` to compare
|
| 1281 |
+
# 4 sections: 3, 2, 1, 0. 50 sections: 3, 2, 2, ... 2, 1, 0
|
| 1282 |
+
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
| 1283 |
+
|
| 1284 |
+
if args.latent_paddings is not None:
|
| 1285 |
+
# parse user defined latent paddings
|
| 1286 |
+
user_latent_paddings = [int(x) for x in args.latent_paddings.split(",")]
|
| 1287 |
+
if len(user_latent_paddings) < total_latent_sections:
|
| 1288 |
+
print(
|
| 1289 |
+
f"User defined latent paddings length {len(user_latent_paddings)} does not match total sections {total_latent_sections}."
|
| 1290 |
+
)
|
| 1291 |
+
print("Use default paddings instead for unspecified sections.")
|
| 1292 |
+
latent_paddings[: len(user_latent_paddings)] = user_latent_paddings
|
| 1293 |
+
elif len(user_latent_paddings) > total_latent_sections:
|
| 1294 |
+
print(
|
| 1295 |
+
f"User defined latent paddings length {len(user_latent_paddings)} is greater than total sections {total_latent_sections}."
|
| 1296 |
+
)
|
| 1297 |
+
print(f"Use only first {total_latent_sections} paddings instead.")
|
| 1298 |
+
latent_paddings = user_latent_paddings[:total_latent_sections]
|
| 1299 |
+
else:
|
| 1300 |
+
latent_paddings = user_latent_paddings
|
| 1301 |
+
else:
|
| 1302 |
+
start_latent = context_img[0]["start_latent"]
|
| 1303 |
+
history_latents = torch.cat([history_latents, start_latent], dim=2)
|
| 1304 |
+
total_generated_latent_frames = 1 # a bit hacky, but we employ the same logic as in official code
|
| 1305 |
+
latent_paddings = [0] * total_latent_sections # dummy paddings for F1 mode
|
| 1306 |
+
|
| 1307 |
+
latent_paddings = list(latent_paddings) # make sure it's a list
|
| 1308 |
+
for loop_index in range(total_latent_sections):
|
| 1309 |
+
latent_padding = latent_paddings[loop_index]
|
| 1310 |
+
|
| 1311 |
+
if not f1_mode:
|
| 1312 |
+
# Inverted Anti-drifting
|
| 1313 |
+
section_index_reverse = loop_index # 0, 1, 2, 3
|
| 1314 |
+
section_index = total_latent_sections - 1 - section_index_reverse # 3, 2, 1, 0
|
| 1315 |
+
section_index_from_last = -(section_index_reverse + 1) # -1, -2, -3, -4
|
| 1316 |
+
|
| 1317 |
+
is_last_section = section_index == 0
|
| 1318 |
+
is_first_section = section_index_reverse == 0
|
| 1319 |
+
latent_padding_size = latent_padding * latent_window_size
|
| 1320 |
+
|
| 1321 |
+
logger.info(f"latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}")
|
| 1322 |
+
else:
|
| 1323 |
+
section_index = loop_index # 0, 1, 2, 3
|
| 1324 |
+
section_index_from_last = section_index - total_latent_sections # -4, -3, -2, -1
|
| 1325 |
+
is_last_section = loop_index == total_latent_sections - 1
|
| 1326 |
+
is_first_section = loop_index == 0
|
| 1327 |
+
latent_padding_size = 0 # dummy padding for F1 mode
|
| 1328 |
+
|
| 1329 |
+
# select start latent
|
| 1330 |
+
if section_index_from_last in context_img:
|
| 1331 |
+
image_index = section_index_from_last
|
| 1332 |
+
elif section_index in context_img:
|
| 1333 |
+
image_index = section_index
|
| 1334 |
+
else:
|
| 1335 |
+
image_index = 0
|
| 1336 |
+
|
| 1337 |
+
start_latent = context_img[image_index]["start_latent"]
|
| 1338 |
+
image_path = context_img[image_index]["image_path"]
|
| 1339 |
+
if image_index != 0: # use section image other than section 0
|
| 1340 |
+
logger.info(
|
| 1341 |
+
f"Apply experimental section image, latent_padding_size = {latent_padding_size}, image_path = {image_path}"
|
| 1342 |
+
)
|
| 1343 |
+
|
| 1344 |
+
if not f1_mode:
|
| 1345 |
+
# Inverted Anti-drifting
|
| 1346 |
+
indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
|
| 1347 |
+
(
|
| 1348 |
+
clean_latent_indices_pre,
|
| 1349 |
+
blank_indices,
|
| 1350 |
+
latent_indices,
|
| 1351 |
+
clean_latent_indices_post,
|
| 1352 |
+
clean_latent_2x_indices,
|
| 1353 |
+
clean_latent_4x_indices,
|
| 1354 |
+
) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
|
| 1355 |
+
|
| 1356 |
+
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
| 1357 |
+
|
| 1358 |
+
clean_latents_pre = start_latent.to(history_latents)
|
| 1359 |
+
clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split(
|
| 1360 |
+
[1, 2, 16], dim=2
|
| 1361 |
+
)
|
| 1362 |
+
clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
|
| 1363 |
+
|
| 1364 |
+
else:
|
| 1365 |
+
# F1 mode
|
| 1366 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
| 1367 |
+
(
|
| 1368 |
+
clean_latent_indices_start,
|
| 1369 |
+
clean_latent_4x_indices,
|
| 1370 |
+
clean_latent_2x_indices,
|
| 1371 |
+
clean_latent_1x_indices,
|
| 1372 |
+
latent_indices,
|
| 1373 |
+
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
| 1374 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
| 1375 |
+
|
| 1376 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]) :, :, :].split(
|
| 1377 |
+
[16, 2, 1], dim=2
|
| 1378 |
+
)
|
| 1379 |
+
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
|
| 1380 |
+
|
| 1381 |
+
# if use_teacache:
|
| 1382 |
+
# transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
| 1383 |
+
# else:
|
| 1384 |
+
# transformer.initialize_teacache(enable_teacache=False)
|
| 1385 |
+
|
| 1386 |
+
# prepare conditioning inputs
|
| 1387 |
+
if section_index_from_last in context:
|
| 1388 |
+
prompt_index = section_index_from_last
|
| 1389 |
+
elif section_index in context:
|
| 1390 |
+
prompt_index = section_index
|
| 1391 |
+
else:
|
| 1392 |
+
prompt_index = 0
|
| 1393 |
+
|
| 1394 |
+
context_for_index = context[prompt_index]
|
| 1395 |
+
# if args.section_prompts is not None:
|
| 1396 |
+
logger.info(f"Section {section_index}: {context_for_index['prompt']}")
|
| 1397 |
+
|
| 1398 |
+
llama_vec = context_for_index["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 1399 |
+
llama_attention_mask = context_for_index["llama_attention_mask"].to(device)
|
| 1400 |
+
clip_l_pooler = context_for_index["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 1401 |
+
|
| 1402 |
+
image_encoder_last_hidden_state = context_img[image_index]["image_encoder_last_hidden_state"].to(
|
| 1403 |
+
device, dtype=torch.bfloat16
|
| 1404 |
+
)
|
| 1405 |
+
|
| 1406 |
+
llama_vec_n = context_null["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 1407 |
+
llama_attention_mask_n = context_null["llama_attention_mask"].to(device)
|
| 1408 |
+
clip_l_pooler_n = context_null["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 1409 |
+
|
| 1410 |
+
preprocess_magcache(args, model)
|
| 1411 |
+
|
| 1412 |
+
generated_latents = sample_hunyuan(
|
| 1413 |
+
transformer=model,
|
| 1414 |
+
sampler=args.sample_solver,
|
| 1415 |
+
width=width,
|
| 1416 |
+
height=height,
|
| 1417 |
+
frames=num_frames,
|
| 1418 |
+
real_guidance_scale=args.guidance_scale,
|
| 1419 |
+
distilled_guidance_scale=args.embedded_cfg_scale,
|
| 1420 |
+
guidance_rescale=args.guidance_rescale,
|
| 1421 |
+
shift=args.flow_shift,
|
| 1422 |
+
num_inference_steps=args.infer_steps,
|
| 1423 |
+
generator=seed_g,
|
| 1424 |
+
prompt_embeds=llama_vec,
|
| 1425 |
+
prompt_embeds_mask=llama_attention_mask,
|
| 1426 |
+
prompt_poolers=clip_l_pooler,
|
| 1427 |
+
negative_prompt_embeds=llama_vec_n,
|
| 1428 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
| 1429 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
| 1430 |
+
device=device,
|
| 1431 |
+
dtype=torch.bfloat16,
|
| 1432 |
+
image_embeddings=image_encoder_last_hidden_state,
|
| 1433 |
+
latent_indices=latent_indices,
|
| 1434 |
+
clean_latents=clean_latents,
|
| 1435 |
+
clean_latent_indices=clean_latent_indices,
|
| 1436 |
+
clean_latents_2x=clean_latents_2x,
|
| 1437 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
| 1438 |
+
clean_latents_4x=clean_latents_4x,
|
| 1439 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
| 1440 |
+
)
|
| 1441 |
+
postprocess_magcache(args, model)
|
| 1442 |
+
|
| 1443 |
+
# concatenate generated latents
|
| 1444 |
+
total_generated_latent_frames += int(generated_latents.shape[2])
|
| 1445 |
+
if not f1_mode:
|
| 1446 |
+
# Inverted Anti-drifting: prepend generated latents to history latents
|
| 1447 |
+
if is_last_section:
|
| 1448 |
+
generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2)
|
| 1449 |
+
total_generated_latent_frames += 1
|
| 1450 |
+
|
| 1451 |
+
history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
|
| 1452 |
+
real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
|
| 1453 |
+
else:
|
| 1454 |
+
# F1 mode: append generated latents to history latents
|
| 1455 |
+
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
|
| 1456 |
+
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
|
| 1457 |
+
|
| 1458 |
+
logger.info(f"Generated. Latent shape {real_history_latents.shape}")
|
| 1459 |
+
|
| 1460 |
+
# # TODO support saving intermediate video
|
| 1461 |
+
# clean_memory_on_device(device)
|
| 1462 |
+
# vae.to(device)
|
| 1463 |
+
# if history_pixels is None:
|
| 1464 |
+
# history_pixels = hunyuan.vae_decode(real_history_latents, vae).cpu()
|
| 1465 |
+
# else:
|
| 1466 |
+
# section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2)
|
| 1467 |
+
# overlapped_frames = latent_window_size * 4 - 3
|
| 1468 |
+
# current_pixels = hunyuan.vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()
|
| 1469 |
+
# history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
|
| 1470 |
+
# vae.to("cpu")
|
| 1471 |
+
# # if not is_last_section:
|
| 1472 |
+
# # # save intermediate video
|
| 1473 |
+
# # save_video(history_pixels[0], args, total_generated_latent_frames)
|
| 1474 |
+
# print(f"Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}")
|
| 1475 |
+
|
| 1476 |
+
# Only clean up shared models if they were created within this function
|
| 1477 |
+
wait_for_clean_memory = False
|
| 1478 |
+
if not (shared_models and "model" in shared_models) and "model" in locals(): # if model was loaded locally
|
| 1479 |
+
del model
|
| 1480 |
+
synchronize_device(device)
|
| 1481 |
+
wait_for_clean_memory = True
|
| 1482 |
+
|
| 1483 |
+
# wait for 5 seconds until block swap is done
|
| 1484 |
+
if wait_for_clean_memory and args.blocks_to_swap > 0:
|
| 1485 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 1486 |
+
time.sleep(5)
|
| 1487 |
+
|
| 1488 |
+
gc.collect()
|
| 1489 |
+
clean_memory_on_device(device)
|
| 1490 |
+
|
| 1491 |
+
return vae_instance_for_return, real_history_latents
|
| 1492 |
+
|
| 1493 |
+
|
| 1494 |
+
def generate_with_one_frame_inference(
|
| 1495 |
+
args: argparse.Namespace,
|
| 1496 |
+
model: HunyuanVideoTransformer3DModelPackedInference,
|
| 1497 |
+
context: Dict[int, Dict[str, torch.Tensor]],
|
| 1498 |
+
context_null: Dict[str, torch.Tensor],
|
| 1499 |
+
context_img: Dict[int, Dict[str, torch.Tensor]],
|
| 1500 |
+
control_latents: Optional[List[torch.Tensor]],
|
| 1501 |
+
control_mask_images: Optional[List[Optional[Image.Image]]],
|
| 1502 |
+
latent_window_size: int,
|
| 1503 |
+
height: int,
|
| 1504 |
+
width: int,
|
| 1505 |
+
device: torch.device,
|
| 1506 |
+
seed_g: torch.Generator,
|
| 1507 |
+
one_frame_inference: set[str],
|
| 1508 |
+
) -> torch.Tensor:
|
| 1509 |
+
# one frame inference
|
| 1510 |
+
sample_num_frames = 1
|
| 1511 |
+
latent_indices = torch.zeros((1, 1), dtype=torch.int64) # 1x1 latent index for target image
|
| 1512 |
+
latent_indices[:, 0] = latent_window_size # last of latent_window
|
| 1513 |
+
|
| 1514 |
+
def get_latent_mask(mask_image: Image.Image) -> torch.Tensor:
|
| 1515 |
+
if mask_image.mode != "L":
|
| 1516 |
+
mask_image = mask_image.convert("L")
|
| 1517 |
+
mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS)
|
| 1518 |
+
mask_image = np.array(mask_image) # PIL to numpy, HWC
|
| 1519 |
+
mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC
|
| 1520 |
+
mask_image = mask_image.squeeze(-1) # HWC -> HW
|
| 1521 |
+
mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (BCFHW)
|
| 1522 |
+
mask_image = mask_image.to(torch.float32)
|
| 1523 |
+
return mask_image
|
| 1524 |
+
|
| 1525 |
+
if control_latents is None or len(control_latents) == 0:
|
| 1526 |
+
logger.info("No control images provided for one frame inference. Use zero latents for control images.")
|
| 1527 |
+
control_latents = [torch.zeros(1, 16, 1, height // 8, width // 8, dtype=torch.float32)]
|
| 1528 |
+
|
| 1529 |
+
if "no_post" not in one_frame_inference:
|
| 1530 |
+
# add zero latents as clean latents post
|
| 1531 |
+
control_latents.append(torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32))
|
| 1532 |
+
logger.info("Add zero latents as clean latents post for one frame inference.")
|
| 1533 |
+
|
| 1534 |
+
# kisekaeichi and 1f-mc: both are using control images, but indices are different
|
| 1535 |
+
clean_latents = torch.cat(control_latents, dim=2) # (1, 16, num_control_images, H//8, W//8)
|
| 1536 |
+
clean_latent_indices = torch.zeros((1, len(control_latents)), dtype=torch.int64)
|
| 1537 |
+
if "no_post" not in one_frame_inference:
|
| 1538 |
+
clean_latent_indices[:, -1] = 1 + latent_window_size # default index for clean latents post
|
| 1539 |
+
|
| 1540 |
+
for i in range(len(control_latents)):
|
| 1541 |
+
mask_image = None
|
| 1542 |
+
if args.control_image_mask_path is not None and i < len(args.control_image_mask_path):
|
| 1543 |
+
mask_image = get_latent_mask(Image.open(args.control_image_mask_path[i]))
|
| 1544 |
+
logger.info(
|
| 1545 |
+
f"Apply mask for clean latents 1x for {i + 1}: {args.control_image_mask_path[i]}, shape: {mask_image.shape}"
|
| 1546 |
+
)
|
| 1547 |
+
elif control_mask_images is not None and i < len(control_mask_images) and control_mask_images[i] is not None:
|
| 1548 |
+
mask_image = get_latent_mask(control_mask_images[i])
|
| 1549 |
+
logger.info(f"Apply mask for clean latents 1x for {i + 1} with alpha channel: {mask_image.shape}")
|
| 1550 |
+
if mask_image is not None:
|
| 1551 |
+
clean_latents[:, :, i : i + 1, :, :] = clean_latents[:, :, i : i + 1, :, :] * mask_image
|
| 1552 |
+
|
| 1553 |
+
for one_frame_param in one_frame_inference:
|
| 1554 |
+
if one_frame_param.startswith("target_index="):
|
| 1555 |
+
target_index = int(one_frame_param.split("=")[1])
|
| 1556 |
+
latent_indices[:, 0] = target_index
|
| 1557 |
+
logger.info(f"Set index for target: {target_index}")
|
| 1558 |
+
elif one_frame_param.startswith("control_index="):
|
| 1559 |
+
control_indices = one_frame_param.split("=")[1].split(";")
|
| 1560 |
+
i = 0
|
| 1561 |
+
while i < len(control_indices) and i < clean_latent_indices.shape[1]:
|
| 1562 |
+
control_index = int(control_indices[i])
|
| 1563 |
+
clean_latent_indices[:, i] = control_index
|
| 1564 |
+
i += 1
|
| 1565 |
+
logger.info(f"Set index for clean latent 1x: {control_indices}")
|
| 1566 |
+
|
| 1567 |
+
# "default" option does nothing, so we can skip it
|
| 1568 |
+
if "default" in one_frame_inference:
|
| 1569 |
+
pass
|
| 1570 |
+
|
| 1571 |
+
if "no_2x" in one_frame_inference:
|
| 1572 |
+
clean_latents_2x = None
|
| 1573 |
+
clean_latent_2x_indices = None
|
| 1574 |
+
logger.info("No clean_latents_2x")
|
| 1575 |
+
else:
|
| 1576 |
+
clean_latents_2x = torch.zeros((1, 16, 2, height // 8, width // 8), dtype=torch.float32)
|
| 1577 |
+
index = 1 + latent_window_size + 1
|
| 1578 |
+
clean_latent_2x_indices = torch.arange(index, index + 2).unsqueeze(0) # 2
|
| 1579 |
+
|
| 1580 |
+
if "no_4x" in one_frame_inference:
|
| 1581 |
+
clean_latents_4x = None
|
| 1582 |
+
clean_latent_4x_indices = None
|
| 1583 |
+
logger.info("No clean_latents_4x")
|
| 1584 |
+
else:
|
| 1585 |
+
clean_latents_4x = torch.zeros((1, 16, 16, height // 8, width // 8), dtype=torch.float32)
|
| 1586 |
+
index = 1 + latent_window_size + 1 + 2
|
| 1587 |
+
clean_latent_4x_indices = torch.arange(index, index + 16).unsqueeze(0) # 16
|
| 1588 |
+
|
| 1589 |
+
logger.info(
|
| 1590 |
+
f"One frame inference. clean_latent: {clean_latents.shape} latent_indices: {latent_indices}, clean_latent_indices: {clean_latent_indices}, num_frames: {sample_num_frames}"
|
| 1591 |
+
)
|
| 1592 |
+
|
| 1593 |
+
# prepare conditioning inputs
|
| 1594 |
+
prompt_index = 0
|
| 1595 |
+
image_index = 0
|
| 1596 |
+
|
| 1597 |
+
context_for_index = context[prompt_index]
|
| 1598 |
+
logger.info(f"Prompt: {context_for_index['prompt']}")
|
| 1599 |
+
|
| 1600 |
+
llama_vec = context_for_index["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 1601 |
+
llama_attention_mask = context_for_index["llama_attention_mask"].to(device)
|
| 1602 |
+
clip_l_pooler = context_for_index["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 1603 |
+
|
| 1604 |
+
image_encoder_last_hidden_state = context_img[image_index]["image_encoder_last_hidden_state"].to(device, dtype=torch.bfloat16)
|
| 1605 |
+
|
| 1606 |
+
llama_vec_n = context_null["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 1607 |
+
llama_attention_mask_n = context_null["llama_attention_mask"].to(device)
|
| 1608 |
+
clip_l_pooler_n = context_null["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 1609 |
+
|
| 1610 |
+
preprocess_magcache(args, model)
|
| 1611 |
+
|
| 1612 |
+
generated_latents = sample_hunyuan(
|
| 1613 |
+
transformer=model,
|
| 1614 |
+
sampler=args.sample_solver,
|
| 1615 |
+
width=width,
|
| 1616 |
+
height=height,
|
| 1617 |
+
frames=1,
|
| 1618 |
+
real_guidance_scale=args.guidance_scale,
|
| 1619 |
+
distilled_guidance_scale=args.embedded_cfg_scale,
|
| 1620 |
+
guidance_rescale=args.guidance_rescale,
|
| 1621 |
+
shift=args.flow_shift,
|
| 1622 |
+
num_inference_steps=args.infer_steps,
|
| 1623 |
+
generator=seed_g,
|
| 1624 |
+
prompt_embeds=llama_vec,
|
| 1625 |
+
prompt_embeds_mask=llama_attention_mask,
|
| 1626 |
+
prompt_poolers=clip_l_pooler,
|
| 1627 |
+
negative_prompt_embeds=llama_vec_n,
|
| 1628 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
| 1629 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
| 1630 |
+
device=device,
|
| 1631 |
+
dtype=torch.bfloat16,
|
| 1632 |
+
image_embeddings=image_encoder_last_hidden_state,
|
| 1633 |
+
latent_indices=latent_indices,
|
| 1634 |
+
clean_latents=clean_latents,
|
| 1635 |
+
clean_latent_indices=clean_latent_indices,
|
| 1636 |
+
clean_latents_2x=clean_latents_2x,
|
| 1637 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
| 1638 |
+
clean_latents_4x=clean_latents_4x,
|
| 1639 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
| 1640 |
+
)
|
| 1641 |
+
|
| 1642 |
+
postprocess_magcache(args, model)
|
| 1643 |
+
|
| 1644 |
+
real_history_latents = generated_latents.to(clean_latents)
|
| 1645 |
+
return real_history_latents
|
| 1646 |
+
|
| 1647 |
+
|
| 1648 |
+
def save_latent(latent: torch.Tensor, args: argparse.Namespace, height: int, width: int) -> str:
|
| 1649 |
+
"""Save latent to file
|
| 1650 |
+
|
| 1651 |
+
Args:
|
| 1652 |
+
latent: Latent tensor
|
| 1653 |
+
args: command line arguments
|
| 1654 |
+
height: height of frame
|
| 1655 |
+
width: width of frame
|
| 1656 |
+
|
| 1657 |
+
Returns:
|
| 1658 |
+
str: Path to saved latent file
|
| 1659 |
+
"""
|
| 1660 |
+
save_path = args.save_path
|
| 1661 |
+
os.makedirs(save_path, exist_ok=True)
|
| 1662 |
+
time_flag = get_time_flag()
|
| 1663 |
+
|
| 1664 |
+
seed = args.seed
|
| 1665 |
+
video_seconds = args.video_seconds
|
| 1666 |
+
|
| 1667 |
+
latent_path = f"{save_path}/{time_flag}_{seed}_latent.safetensors"
|
| 1668 |
+
|
| 1669 |
+
if args.no_metadata:
|
| 1670 |
+
metadata = None
|
| 1671 |
+
else:
|
| 1672 |
+
metadata = {
|
| 1673 |
+
"seeds": f"{seed}",
|
| 1674 |
+
"prompt": f"{args.prompt}",
|
| 1675 |
+
"height": f"{height}",
|
| 1676 |
+
"width": f"{width}",
|
| 1677 |
+
"video_seconds": f"{video_seconds}",
|
| 1678 |
+
"infer_steps": f"{args.infer_steps}",
|
| 1679 |
+
"guidance_scale": f"{args.guidance_scale}",
|
| 1680 |
+
"latent_window_size": f"{args.latent_window_size}",
|
| 1681 |
+
"embedded_cfg_scale": f"{args.embedded_cfg_scale}",
|
| 1682 |
+
"guidance_rescale": f"{args.guidance_rescale}",
|
| 1683 |
+
"sample_solver": f"{args.sample_solver}",
|
| 1684 |
+
"latent_window_size": f"{args.latent_window_size}",
|
| 1685 |
+
"fps": f"{args.fps}",
|
| 1686 |
+
}
|
| 1687 |
+
if args.negative_prompt is not None:
|
| 1688 |
+
metadata["negative_prompt"] = f"{args.negative_prompt}"
|
| 1689 |
+
|
| 1690 |
+
sd = {"latent": latent.contiguous()}
|
| 1691 |
+
save_file(sd, latent_path, metadata=metadata)
|
| 1692 |
+
logger.info(f"Latent saved to: {latent_path}")
|
| 1693 |
+
|
| 1694 |
+
return latent_path
|
| 1695 |
+
|
| 1696 |
+
|
| 1697 |
+
def save_video(
|
| 1698 |
+
video: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None, latent_frames: Optional[int] = None
|
| 1699 |
+
) -> str:
|
| 1700 |
+
"""Save video to file
|
| 1701 |
+
|
| 1702 |
+
Args:
|
| 1703 |
+
video: Video tensor
|
| 1704 |
+
args: command line arguments
|
| 1705 |
+
original_base_name: Original base name (if latents are loaded from files)
|
| 1706 |
+
|
| 1707 |
+
Returns:
|
| 1708 |
+
str: Path to saved video file
|
| 1709 |
+
"""
|
| 1710 |
+
save_path = args.save_path
|
| 1711 |
+
os.makedirs(save_path, exist_ok=True)
|
| 1712 |
+
time_flag = get_time_flag()
|
| 1713 |
+
|
| 1714 |
+
seed = args.seed
|
| 1715 |
+
original_name = "" if original_base_name is None else f"_{original_base_name}"
|
| 1716 |
+
latent_frames = "" if latent_frames is None else f"_{latent_frames}"
|
| 1717 |
+
video_path = f"{save_path}/{time_flag}_{seed}{original_name}{latent_frames}.mp4"
|
| 1718 |
+
|
| 1719 |
+
video = video.unsqueeze(0)
|
| 1720 |
+
save_videos_grid(video, video_path, fps=args.fps, rescale=True)
|
| 1721 |
+
logger.info(f"Video saved to: {video_path}")
|
| 1722 |
+
|
| 1723 |
+
return video_path
|
| 1724 |
+
|
| 1725 |
+
|
| 1726 |
+
def save_images(sample: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str:
|
| 1727 |
+
"""Save images to directory
|
| 1728 |
+
|
| 1729 |
+
Args:
|
| 1730 |
+
sample: Video tensor
|
| 1731 |
+
args: command line arguments
|
| 1732 |
+
original_base_name: Original base name (if latents are loaded from files)
|
| 1733 |
+
|
| 1734 |
+
Returns:
|
| 1735 |
+
str: Path to saved images directory
|
| 1736 |
+
"""
|
| 1737 |
+
save_path = args.save_path
|
| 1738 |
+
os.makedirs(save_path, exist_ok=True)
|
| 1739 |
+
time_flag = get_time_flag()
|
| 1740 |
+
|
| 1741 |
+
seed = args.seed
|
| 1742 |
+
original_name = "" if original_base_name is None else f"_{original_base_name}"
|
| 1743 |
+
image_name = f"{time_flag}_{seed}{original_name}"
|
| 1744 |
+
sample = sample.unsqueeze(0)
|
| 1745 |
+
one_frame_mode = args.one_frame_inference is not None
|
| 1746 |
+
save_images_grid(sample, save_path, image_name, rescale=True, create_subdir=not one_frame_mode)
|
| 1747 |
+
logger.info(f"Sample images saved to: {save_path}/{image_name}")
|
| 1748 |
+
|
| 1749 |
+
return f"{save_path}/{image_name}"
|
| 1750 |
+
|
| 1751 |
+
|
| 1752 |
+
def save_output(
|
| 1753 |
+
args: argparse.Namespace,
|
| 1754 |
+
vae: AutoencoderKLCausal3D, # Expect a VAE instance for decoding
|
| 1755 |
+
latent: torch.Tensor,
|
| 1756 |
+
device: torch.device,
|
| 1757 |
+
original_base_names: Optional[List[str]] = None,
|
| 1758 |
+
) -> None:
|
| 1759 |
+
"""save output
|
| 1760 |
+
|
| 1761 |
+
Args:
|
| 1762 |
+
args: command line arguments
|
| 1763 |
+
vae: VAE model
|
| 1764 |
+
latent: latent tensor
|
| 1765 |
+
device: device to use
|
| 1766 |
+
original_base_names: original base names (if latents are loaded from files)
|
| 1767 |
+
"""
|
| 1768 |
+
height, width = latent.shape[-2], latent.shape[-1] # BCTHW
|
| 1769 |
+
height *= 8
|
| 1770 |
+
width *= 8
|
| 1771 |
+
# print(f"Saving output. Latent shape {latent.shape}; pixel shape {height}x{width}")
|
| 1772 |
+
if args.output_type == "latent" or args.output_type == "both" or args.output_type == "latent_images":
|
| 1773 |
+
# save latent
|
| 1774 |
+
save_latent(latent, args, height, width)
|
| 1775 |
+
if args.output_type == "latent":
|
| 1776 |
+
return
|
| 1777 |
+
|
| 1778 |
+
if vae is None:
|
| 1779 |
+
logger.error("VAE is None, cannot decode latents for saving video/images.")
|
| 1780 |
+
return
|
| 1781 |
+
|
| 1782 |
+
total_latent_sections = (args.video_seconds * 30) / (args.latent_window_size * 4)
|
| 1783 |
+
total_latent_sections = int(max(round(total_latent_sections), 1))
|
| 1784 |
+
video = decode_latent(
|
| 1785 |
+
args.latent_window_size, total_latent_sections, args.bulk_decode, vae, latent, device, args.one_frame_inference is not None
|
| 1786 |
+
)
|
| 1787 |
+
|
| 1788 |
+
if args.output_type == "video" or args.output_type == "both":
|
| 1789 |
+
# save video
|
| 1790 |
+
original_name = "" if original_base_names is None else f"_{original_base_names[0]}"
|
| 1791 |
+
save_video(video, args, original_name)
|
| 1792 |
+
|
| 1793 |
+
elif args.output_type == "images" or args.output_type == "latent_images":
|
| 1794 |
+
# save images
|
| 1795 |
+
original_name = "" if original_base_names is None else f"_{original_base_names[0]}"
|
| 1796 |
+
save_images(video, args, original_name)
|
| 1797 |
+
|
| 1798 |
+
|
| 1799 |
+
def preprocess_prompts_for_batch(prompt_lines: List[str], base_args: argparse.Namespace) -> List[Dict]:
|
| 1800 |
+
"""Process multiple prompts for batch mode
|
| 1801 |
+
|
| 1802 |
+
Args:
|
| 1803 |
+
prompt_lines: List of prompt lines
|
| 1804 |
+
base_args: Base command line arguments
|
| 1805 |
+
|
| 1806 |
+
Returns:
|
| 1807 |
+
List[Dict]: List of prompt data dictionaries
|
| 1808 |
+
"""
|
| 1809 |
+
prompts_data = []
|
| 1810 |
+
|
| 1811 |
+
for line in prompt_lines:
|
| 1812 |
+
line = line.strip()
|
| 1813 |
+
if not line or line.startswith("#"): # Skip empty lines and comments
|
| 1814 |
+
continue
|
| 1815 |
+
|
| 1816 |
+
# Parse prompt line and create override dictionary
|
| 1817 |
+
prompt_data = parse_prompt_line(line)
|
| 1818 |
+
logger.info(f"Parsed prompt data: {prompt_data}")
|
| 1819 |
+
prompts_data.append(prompt_data)
|
| 1820 |
+
|
| 1821 |
+
return prompts_data
|
| 1822 |
+
|
| 1823 |
+
|
| 1824 |
+
def load_shared_models(args: argparse.Namespace) -> Dict:
|
| 1825 |
+
"""Load shared models for batch processing or interactive mode.
|
| 1826 |
+
Models are loaded to CPU to save memory. VAE is NOT loaded here.
|
| 1827 |
+
DiT model is also NOT loaded here, handled by process_batch_prompts or generate.
|
| 1828 |
+
|
| 1829 |
+
Args:
|
| 1830 |
+
args: Base command line arguments
|
| 1831 |
+
|
| 1832 |
+
Returns:
|
| 1833 |
+
Dict: Dictionary of shared models (text/image encoders)
|
| 1834 |
+
"""
|
| 1835 |
+
shared_models = {}
|
| 1836 |
+
# Load text encoders to CPU
|
| 1837 |
+
tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, "cpu")
|
| 1838 |
+
tokenizer2, text_encoder2 = load_text_encoder2(args) # Assumes it loads to CPU or handles device internally
|
| 1839 |
+
# Load image encoders to CPU
|
| 1840 |
+
feature_extractor, image_encoder = load_image_encoders(args) # Assumes it loads to CPU or handles device internally
|
| 1841 |
+
|
| 1842 |
+
shared_models["tokenizer1"] = tokenizer1
|
| 1843 |
+
shared_models["text_encoder1"] = text_encoder1
|
| 1844 |
+
shared_models["tokenizer2"] = tokenizer2
|
| 1845 |
+
shared_models["text_encoder2"] = text_encoder2
|
| 1846 |
+
shared_models["feature_extractor"] = feature_extractor
|
| 1847 |
+
shared_models["image_encoder"] = image_encoder
|
| 1848 |
+
|
| 1849 |
+
return shared_models
|
| 1850 |
+
|
| 1851 |
+
|
| 1852 |
+
def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) -> None:
|
| 1853 |
+
"""Process multiple prompts with model reuse and batched precomputation
|
| 1854 |
+
|
| 1855 |
+
Args:
|
| 1856 |
+
prompts_data: List of prompt data dictionaries
|
| 1857 |
+
args: Base command line arguments
|
| 1858 |
+
"""
|
| 1859 |
+
if not prompts_data:
|
| 1860 |
+
logger.warning("No valid prompts found")
|
| 1861 |
+
return
|
| 1862 |
+
|
| 1863 |
+
gen_settings = get_generation_settings(args)
|
| 1864 |
+
device = gen_settings.device
|
| 1865 |
+
|
| 1866 |
+
# 1. Precompute Image Data (VAE and Image Encoders)
|
| 1867 |
+
logger.info("Loading VAE and Image Encoders for batch image preprocessing...")
|
| 1868 |
+
vae_for_batch = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, args.vae_tiling, "cpu")
|
| 1869 |
+
feature_extractor_batch, image_encoder_batch = load_image_encoders(args) # Assume loads to CPU
|
| 1870 |
+
|
| 1871 |
+
all_precomputed_image_data = []
|
| 1872 |
+
all_prompt_args_list = [apply_overrides(args, pd) for pd in prompts_data] # Create all arg instances first
|
| 1873 |
+
|
| 1874 |
+
logger.info("Preprocessing images and VAE encoding for all prompts...")
|
| 1875 |
+
|
| 1876 |
+
# VAE and Image Encoder to device for this phase, because we do not want to offload them to CPU
|
| 1877 |
+
vae_for_batch.to(device)
|
| 1878 |
+
image_encoder_batch.to(device)
|
| 1879 |
+
|
| 1880 |
+
# Pass models via a temporary shared_models dict for prepare_image_inputs
|
| 1881 |
+
# This ensures prepare_image_inputs can use them if it expects them in shared_models
|
| 1882 |
+
# Or it can load them if this dict is empty (though here we provide them)
|
| 1883 |
+
temp_shared_models_img = {"feature_extractor": feature_extractor_batch, "image_encoder": image_encoder_batch}
|
| 1884 |
+
|
| 1885 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 1886 |
+
logger.info(f"Image preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 1887 |
+
# prepare_image_inputs will move vae/image_encoder to device temporarily
|
| 1888 |
+
image_data = prepare_image_inputs(prompt_args_item, device, vae_for_batch, temp_shared_models_img)
|
| 1889 |
+
all_precomputed_image_data.append(image_data)
|
| 1890 |
+
|
| 1891 |
+
# Models should be back on GPU because prepare_image_inputs moved them to the original device
|
| 1892 |
+
del feature_extractor_batch, image_encoder_batch, temp_shared_models_img
|
| 1893 |
+
vae_for_batch.to("cpu") # Move VAE back to CPU
|
| 1894 |
+
clean_memory_on_device(device)
|
| 1895 |
+
|
| 1896 |
+
# 2. Precompute Text Data (Text Encoders)
|
| 1897 |
+
logger.info("Loading Text Encoders for batch text preprocessing...")
|
| 1898 |
+
# Text Encoders loaded to CPU by load_text_encoder1/2
|
| 1899 |
+
tokenizer1_batch, text_encoder1_batch = load_text_encoder1(args, args.fp8_llm, device)
|
| 1900 |
+
tokenizer2_batch, text_encoder2_batch = load_text_encoder2(args)
|
| 1901 |
+
|
| 1902 |
+
# Text Encoders to device for this phase
|
| 1903 |
+
text_encoder2_batch.to(device) # Moved into prepare_text_inputs logic
|
| 1904 |
+
|
| 1905 |
+
all_precomputed_text_data = []
|
| 1906 |
+
conds_cache_batch = {}
|
| 1907 |
+
|
| 1908 |
+
logger.info("Preprocessing text and LLM/TextEncoder encoding for all prompts...")
|
| 1909 |
+
temp_shared_models_txt = {
|
| 1910 |
+
"tokenizer1": tokenizer1_batch,
|
| 1911 |
+
"text_encoder1": text_encoder1_batch, # on GPU
|
| 1912 |
+
"tokenizer2": tokenizer2_batch,
|
| 1913 |
+
"text_encoder2": text_encoder2_batch, # on GPU
|
| 1914 |
+
"conds_cache": conds_cache_batch,
|
| 1915 |
+
}
|
| 1916 |
+
|
| 1917 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 1918 |
+
logger.info(f"Text preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 1919 |
+
# prepare_text_inputs will move text_encoders to device temporarily
|
| 1920 |
+
text_data = prepare_text_inputs(prompt_args_item, device, temp_shared_models_txt)
|
| 1921 |
+
all_precomputed_text_data.append(text_data)
|
| 1922 |
+
|
| 1923 |
+
# Models should be removed from device after prepare_text_inputs
|
| 1924 |
+
del tokenizer1_batch, text_encoder1_batch, tokenizer2_batch, text_encoder2_batch, temp_shared_models_txt, conds_cache_batch
|
| 1925 |
+
gc.collect() # transformer==4.54.1 seems to need this to free memory
|
| 1926 |
+
clean_memory_on_device(device)
|
| 1927 |
+
|
| 1928 |
+
# 3. Load DiT Model once
|
| 1929 |
+
logger.info("Loading DiT model for batch generation...")
|
| 1930 |
+
# Use args from the first prompt for DiT loading (LoRA etc. should be consistent for a batch)
|
| 1931 |
+
first_prompt_args = all_prompt_args_list[0]
|
| 1932 |
+
|
| 1933 |
+
dit_model = load_dit_model(first_prompt_args, device) # Load directly to target device if possible
|
| 1934 |
+
if first_prompt_args.save_merged_model:
|
| 1935 |
+
logger.info("Merged DiT model saved. Skipping generation.")
|
| 1936 |
+
del dit_model
|
| 1937 |
+
clean_memory_on_device(device)
|
| 1938 |
+
return
|
| 1939 |
+
|
| 1940 |
+
shared_models_for_generate = {"model": dit_model} # Pass DiT via shared_models
|
| 1941 |
+
|
| 1942 |
+
all_latents = []
|
| 1943 |
+
|
| 1944 |
+
logger.info("Generating latents for all prompts...")
|
| 1945 |
+
with torch.no_grad():
|
| 1946 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 1947 |
+
current_image_data = all_precomputed_image_data[i]
|
| 1948 |
+
current_text_data = all_precomputed_text_data[i]
|
| 1949 |
+
|
| 1950 |
+
logger.info(f"Generating latent for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 1951 |
+
try:
|
| 1952 |
+
# generate is called with precomputed data, so it won't load VAE/Text/Image encoders.
|
| 1953 |
+
# It will use the DiT model from shared_models_for_generate.
|
| 1954 |
+
# The VAE instance returned by generate will be None here.
|
| 1955 |
+
_, latent = generate(
|
| 1956 |
+
prompt_args_item, gen_settings, shared_models_for_generate, current_image_data, current_text_data
|
| 1957 |
+
)
|
| 1958 |
+
|
| 1959 |
+
if latent is None and prompt_args_item.save_merged_model: # Should be caught earlier
|
| 1960 |
+
continue
|
| 1961 |
+
|
| 1962 |
+
# Save latent if needed (using data from precomputed_image_data for H/W)
|
| 1963 |
+
if prompt_args_item.output_type in ["latent", "both", "latent_images"]:
|
| 1964 |
+
height = current_image_data["height"]
|
| 1965 |
+
width = current_image_data["width"]
|
| 1966 |
+
save_latent(latent, prompt_args_item, height, width)
|
| 1967 |
+
|
| 1968 |
+
all_latents.append(latent)
|
| 1969 |
+
except Exception as e:
|
| 1970 |
+
logger.error(f"Error generating latent for prompt: {prompt_args_item.prompt}. Error: {e}", exc_info=True)
|
| 1971 |
+
all_latents.append(None) # Add placeholder for failed generations
|
| 1972 |
+
continue
|
| 1973 |
+
|
| 1974 |
+
# Free DiT model
|
| 1975 |
+
logger.info("Releasing DiT model from memory...")
|
| 1976 |
+
if args.blocks_to_swap > 0:
|
| 1977 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 1978 |
+
time.sleep(5)
|
| 1979 |
+
|
| 1980 |
+
del shared_models_for_generate["model"]
|
| 1981 |
+
del dit_model
|
| 1982 |
+
gc.collect()
|
| 1983 |
+
clean_memory_on_device(device)
|
| 1984 |
+
synchronize_device(device) # Ensure memory is freed before loading VAE for decoding
|
| 1985 |
+
|
| 1986 |
+
# 4. Decode latents and save outputs (using vae_for_batch)
|
| 1987 |
+
if args.output_type != "latent":
|
| 1988 |
+
logger.info("Decoding latents to videos/images using batched VAE...")
|
| 1989 |
+
vae_for_batch.to(device) # Move VAE to device for decoding
|
| 1990 |
+
|
| 1991 |
+
for i, latent in enumerate(all_latents):
|
| 1992 |
+
if latent is None: # Skip failed generations
|
| 1993 |
+
logger.warning(f"Skipping decoding for prompt {i + 1} due to previous error.")
|
| 1994 |
+
continue
|
| 1995 |
+
|
| 1996 |
+
current_args = all_prompt_args_list[i]
|
| 1997 |
+
logger.info(f"Decoding output {i + 1}/{len(all_latents)} for prompt: {current_args.prompt}")
|
| 1998 |
+
|
| 1999 |
+
# if args.output_type is "both" or "latent_images", we already saved latent above.
|
| 2000 |
+
# so we skip saving latent here.
|
| 2001 |
+
if current_args.output_type == "both":
|
| 2002 |
+
current_args.output_type = "video"
|
| 2003 |
+
elif current_args.output_type == "latent_images":
|
| 2004 |
+
current_args.output_type = "images"
|
| 2005 |
+
|
| 2006 |
+
# save_output expects latent to be [BCTHW] or [CTHW]. generate returns [BCTHW] (batch size 1).
|
| 2007 |
+
# latent[0] is correct if generate returns it with batch dim.
|
| 2008 |
+
# The latent from generate is (1, C, T, H, W)
|
| 2009 |
+
save_output(current_args, vae_for_batch, latent[0], device) # Pass vae_for_batch
|
| 2010 |
+
|
| 2011 |
+
vae_for_batch.to("cpu") # Move VAE back to CPU
|
| 2012 |
+
|
| 2013 |
+
del vae_for_batch
|
| 2014 |
+
clean_memory_on_device(device)
|
| 2015 |
+
|
| 2016 |
+
|
| 2017 |
+
def process_interactive(args: argparse.Namespace) -> None:
|
| 2018 |
+
"""Process prompts in interactive mode
|
| 2019 |
+
|
| 2020 |
+
Args:
|
| 2021 |
+
args: Base command line arguments
|
| 2022 |
+
"""
|
| 2023 |
+
gen_settings = get_generation_settings(args)
|
| 2024 |
+
device = gen_settings.device
|
| 2025 |
+
shared_models = load_shared_models(args)
|
| 2026 |
+
shared_models["conds_cache"] = {} # Initialize empty cache for interactive mode
|
| 2027 |
+
|
| 2028 |
+
print("Interactive mode. Enter prompts (Ctrl+D or Ctrl+Z (Windows) to exit):")
|
| 2029 |
+
|
| 2030 |
+
try:
|
| 2031 |
+
import prompt_toolkit
|
| 2032 |
+
except ImportError:
|
| 2033 |
+
logger.warning("prompt_toolkit not found. Using basic input instead.")
|
| 2034 |
+
prompt_toolkit = None
|
| 2035 |
+
|
| 2036 |
+
if prompt_toolkit:
|
| 2037 |
+
session = prompt_toolkit.PromptSession()
|
| 2038 |
+
|
| 2039 |
+
def input_line(prompt: str) -> str:
|
| 2040 |
+
return session.prompt(prompt)
|
| 2041 |
+
|
| 2042 |
+
else:
|
| 2043 |
+
|
| 2044 |
+
def input_line(prompt: str) -> str:
|
| 2045 |
+
return input(prompt)
|
| 2046 |
+
|
| 2047 |
+
try:
|
| 2048 |
+
while True:
|
| 2049 |
+
try:
|
| 2050 |
+
line = input_line("> ")
|
| 2051 |
+
if not line.strip():
|
| 2052 |
+
continue
|
| 2053 |
+
if len(line.strip()) == 1 and line.strip() in ["\x04", "\x1a"]: # Ctrl+D or Ctrl+Z with prompt_toolkit
|
| 2054 |
+
raise EOFError # Exit on Ctrl+D or Ctrl+Z
|
| 2055 |
+
|
| 2056 |
+
# Parse prompt
|
| 2057 |
+
prompt_data = parse_prompt_line(line)
|
| 2058 |
+
prompt_args = apply_overrides(args, prompt_data)
|
| 2059 |
+
|
| 2060 |
+
# Generate latent
|
| 2061 |
+
# For interactive, precomputed data is None. shared_models contains text/image encoders.
|
| 2062 |
+
# generate will load VAE internally.
|
| 2063 |
+
returned_vae, latent = generate(prompt_args, gen_settings, shared_models)
|
| 2064 |
+
|
| 2065 |
+
# If not one_frame_inference, move DiT model to CPU after generation
|
| 2066 |
+
# if not prompt_args.one_frame_inference:
|
| 2067 |
+
if prompt_args.blocks_to_swap > 0:
|
| 2068 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 2069 |
+
time.sleep(5)
|
| 2070 |
+
model = shared_models.get("model")
|
| 2071 |
+
model.to("cpu") # Move DiT model to CPU after generation
|
| 2072 |
+
clean_memory_on_device(device)
|
| 2073 |
+
synchronize_device(device) # Ensure memory is freed before loading VAE for decoding
|
| 2074 |
+
|
| 2075 |
+
# Save latent and video
|
| 2076 |
+
# returned_vae from generate will be used for decoding here.
|
| 2077 |
+
save_output(prompt_args, returned_vae, latent[0], device)
|
| 2078 |
+
|
| 2079 |
+
except KeyboardInterrupt:
|
| 2080 |
+
print("\nInterrupted. Continue (Ctrl+D or Ctrl+Z (Windows) to exit)")
|
| 2081 |
+
continue
|
| 2082 |
+
|
| 2083 |
+
except EOFError:
|
| 2084 |
+
print("\nExiting interactive mode")
|
| 2085 |
+
|
| 2086 |
+
|
| 2087 |
+
def get_generation_settings(args: argparse.Namespace) -> GenerationSettings:
|
| 2088 |
+
device = torch.device(args.device)
|
| 2089 |
+
|
| 2090 |
+
dit_weight_dtype = None # default
|
| 2091 |
+
if args.fp8_scaled:
|
| 2092 |
+
dit_weight_dtype = None # various precision weights, so don't cast to specific dtype
|
| 2093 |
+
elif args.fp8:
|
| 2094 |
+
dit_weight_dtype = torch.float8_e4m3fn
|
| 2095 |
+
|
| 2096 |
+
logger.info(f"Using device: {device}, DiT weight weight precision: {dit_weight_dtype}")
|
| 2097 |
+
|
| 2098 |
+
gen_settings = GenerationSettings(device=device, dit_weight_dtype=dit_weight_dtype)
|
| 2099 |
+
return gen_settings
|
| 2100 |
+
|
| 2101 |
+
|
| 2102 |
+
def main():
|
| 2103 |
+
# Parse arguments
|
| 2104 |
+
args = parse_args()
|
| 2105 |
+
|
| 2106 |
+
assert (not args.save_merged_model) or (not args.fp8_scaled), "Save merged model is not compatible with fp8_scaled"
|
| 2107 |
+
|
| 2108 |
+
# Check if latents are provided
|
| 2109 |
+
latents_mode = args.latent_path is not None and len(args.latent_path) > 0
|
| 2110 |
+
|
| 2111 |
+
# Set device
|
| 2112 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 2113 |
+
device = torch.device(device)
|
| 2114 |
+
logger.info(f"Using device: {device}")
|
| 2115 |
+
args.device = device
|
| 2116 |
+
|
| 2117 |
+
if latents_mode:
|
| 2118 |
+
# Original latent decode mode
|
| 2119 |
+
original_base_names = []
|
| 2120 |
+
latents_list = []
|
| 2121 |
+
seeds = []
|
| 2122 |
+
|
| 2123 |
+
# assert len(args.latent_path) == 1, "Only one latent path is supported for now"
|
| 2124 |
+
|
| 2125 |
+
for latent_path in args.latent_path:
|
| 2126 |
+
original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0])
|
| 2127 |
+
seed = 0
|
| 2128 |
+
|
| 2129 |
+
if os.path.splitext(latent_path)[1] != ".safetensors":
|
| 2130 |
+
latents = torch.load(latent_path, map_location="cpu")
|
| 2131 |
+
else:
|
| 2132 |
+
state_dict = load_file(latent_path)
|
| 2133 |
+
if "latent" in state_dict:
|
| 2134 |
+
latents = state_dict["latent"]
|
| 2135 |
+
else:
|
| 2136 |
+
for key in state_dict:
|
| 2137 |
+
if key.startswith("latent") and state_dict[key].ndim >= 4:
|
| 2138 |
+
latents = state_dict[key]
|
| 2139 |
+
logger.warning(f"'latent' not found in state_dict. Using '{key}' instead.")
|
| 2140 |
+
break
|
| 2141 |
+
else:
|
| 2142 |
+
raise KeyError(f"'latent' not found in state_dict keys: {list(state_dict.keys())}")
|
| 2143 |
+
|
| 2144 |
+
with safe_open(latent_path, framework="pt") as f:
|
| 2145 |
+
metadata = f.metadata()
|
| 2146 |
+
if metadata is None:
|
| 2147 |
+
metadata = {}
|
| 2148 |
+
logger.info(f"Loaded metadata: {metadata}")
|
| 2149 |
+
|
| 2150 |
+
if "seeds" in metadata:
|
| 2151 |
+
seed = int(metadata["seeds"])
|
| 2152 |
+
if "height" in metadata and "width" in metadata:
|
| 2153 |
+
height = int(metadata["height"])
|
| 2154 |
+
width = int(metadata["width"])
|
| 2155 |
+
args.video_size = [height, width]
|
| 2156 |
+
if "video_seconds" in metadata:
|
| 2157 |
+
args.video_seconds = float(metadata["video_seconds"])
|
| 2158 |
+
|
| 2159 |
+
seeds.append(seed)
|
| 2160 |
+
logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}")
|
| 2161 |
+
|
| 2162 |
+
if latents.ndim == 5: # [BCTHW]
|
| 2163 |
+
latents = latents.squeeze(0) # [CTHW]
|
| 2164 |
+
|
| 2165 |
+
latents_list.append(latents)
|
| 2166 |
+
|
| 2167 |
+
# latent = torch.stack(latents_list, dim=0) # [N, ...], must be same shape
|
| 2168 |
+
|
| 2169 |
+
for i, latent in enumerate(latents_list):
|
| 2170 |
+
args.seed = seeds[i]
|
| 2171 |
+
|
| 2172 |
+
vae = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, args.vae_tiling, device)
|
| 2173 |
+
save_output(args, vae, latent, device, original_base_names)
|
| 2174 |
+
|
| 2175 |
+
elif args.from_file:
|
| 2176 |
+
# Batch mode from file
|
| 2177 |
+
|
| 2178 |
+
# Read prompts from file
|
| 2179 |
+
with open(args.from_file, "r", encoding="utf-8") as f:
|
| 2180 |
+
prompt_lines = f.readlines()
|
| 2181 |
+
|
| 2182 |
+
# Process prompts
|
| 2183 |
+
prompts_data = preprocess_prompts_for_batch(prompt_lines, args)
|
| 2184 |
+
process_batch_prompts(prompts_data, args)
|
| 2185 |
+
|
| 2186 |
+
elif args.interactive:
|
| 2187 |
+
# Interactive mode
|
| 2188 |
+
process_interactive(args)
|
| 2189 |
+
|
| 2190 |
+
else:
|
| 2191 |
+
# Single prompt mode (original behavior)
|
| 2192 |
+
|
| 2193 |
+
# Generate latent
|
| 2194 |
+
gen_settings = get_generation_settings(args)
|
| 2195 |
+
# For single mode, precomputed data is None, shared_models is None.
|
| 2196 |
+
# generate will load all necessary models (VAE, Text/Image Encoders, DiT).
|
| 2197 |
+
returned_vae, latent = generate(args, gen_settings)
|
| 2198 |
+
# print(f"Generated latent shape: {latent.shape}")
|
| 2199 |
+
if args.save_merged_model:
|
| 2200 |
+
return
|
| 2201 |
+
|
| 2202 |
+
# Save latent and video
|
| 2203 |
+
# returned_vae from generate will be used for decoding here.
|
| 2204 |
+
save_output(args, returned_vae, latent[0], device)
|
| 2205 |
+
|
| 2206 |
+
logger.info("Done!")
|
| 2207 |
+
|
| 2208 |
+
|
| 2209 |
+
if __name__ == "__main__":
|
| 2210 |
+
main()
|
src/musubi_tuner/fpack_train_network.py
ADDED
|
@@ -0,0 +1,637 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import gc
|
| 3 |
+
import math
|
| 4 |
+
import time
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from accelerate import Accelerator
|
| 12 |
+
|
| 13 |
+
from musubi_tuner.dataset import image_video_dataset
|
| 14 |
+
from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_FRAMEPACK, ARCHITECTURE_FRAMEPACK_FULL
|
| 15 |
+
from musubi_tuner.fpack_generate_video import decode_latent
|
| 16 |
+
from musubi_tuner.frame_pack import hunyuan
|
| 17 |
+
from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode
|
| 18 |
+
from musubi_tuner.frame_pack.framepack_utils import load_image_encoders, load_text_encoder1, load_text_encoder2
|
| 19 |
+
from musubi_tuner.frame_pack.framepack_utils import load_vae as load_framepack_vae
|
| 20 |
+
from musubi_tuner.frame_pack.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked, load_packed_model
|
| 21 |
+
from musubi_tuner.frame_pack.k_diffusion_hunyuan import sample_hunyuan
|
| 22 |
+
from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask
|
| 23 |
+
from musubi_tuner.dataset.image_video_dataset import resize_image_to_bucket
|
| 24 |
+
from musubi_tuner.hv_train_network import (
|
| 25 |
+
NetworkTrainer,
|
| 26 |
+
load_prompts,
|
| 27 |
+
clean_memory_on_device,
|
| 28 |
+
setup_parser_common,
|
| 29 |
+
read_config_from_file,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
import logging
|
| 33 |
+
|
| 34 |
+
from musubi_tuner.utils import model_utils
|
| 35 |
+
|
| 36 |
+
logger = logging.getLogger(__name__)
|
| 37 |
+
logging.basicConfig(level=logging.INFO)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class FramePackNetworkTrainer(NetworkTrainer):
|
| 41 |
+
def __init__(self):
|
| 42 |
+
super().__init__()
|
| 43 |
+
|
| 44 |
+
# region model specific
|
| 45 |
+
|
| 46 |
+
@property
|
| 47 |
+
def architecture(self) -> str:
|
| 48 |
+
return ARCHITECTURE_FRAMEPACK
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def architecture_full_name(self) -> str:
|
| 52 |
+
return ARCHITECTURE_FRAMEPACK_FULL
|
| 53 |
+
|
| 54 |
+
def handle_model_specific_args(self, args):
|
| 55 |
+
self._i2v_training = True
|
| 56 |
+
self._control_training = False
|
| 57 |
+
self.default_guidance_scale = 10.0 # embeded guidance scale
|
| 58 |
+
|
| 59 |
+
def process_sample_prompts(
|
| 60 |
+
self,
|
| 61 |
+
args: argparse.Namespace,
|
| 62 |
+
accelerator: Accelerator,
|
| 63 |
+
sample_prompts: str,
|
| 64 |
+
):
|
| 65 |
+
device = accelerator.device
|
| 66 |
+
|
| 67 |
+
logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}")
|
| 68 |
+
prompts = load_prompts(sample_prompts)
|
| 69 |
+
|
| 70 |
+
# load text encoder
|
| 71 |
+
tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device)
|
| 72 |
+
tokenizer2, text_encoder2 = load_text_encoder2(args)
|
| 73 |
+
text_encoder2.to(device)
|
| 74 |
+
|
| 75 |
+
sample_prompts_te_outputs = {} # (prompt) -> (t1 embeds, t1 mask, t2 embeds)
|
| 76 |
+
for prompt_dict in prompts:
|
| 77 |
+
for p in [prompt_dict.get("prompt", ""), prompt_dict.get("negative_prompt", "")]:
|
| 78 |
+
if p is None or p in sample_prompts_te_outputs:
|
| 79 |
+
continue
|
| 80 |
+
logger.info(f"cache Text Encoder outputs for prompt: {p}")
|
| 81 |
+
with torch.amp.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 82 |
+
llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(p, text_encoder1, text_encoder2, tokenizer1, tokenizer2)
|
| 83 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
| 84 |
+
|
| 85 |
+
llama_vec = llama_vec.to("cpu")
|
| 86 |
+
llama_attention_mask = llama_attention_mask.to("cpu")
|
| 87 |
+
clip_l_pooler = clip_l_pooler.to("cpu")
|
| 88 |
+
sample_prompts_te_outputs[p] = (llama_vec, llama_attention_mask, clip_l_pooler)
|
| 89 |
+
del text_encoder1, text_encoder2
|
| 90 |
+
clean_memory_on_device(device)
|
| 91 |
+
|
| 92 |
+
# image embedding for I2V training
|
| 93 |
+
feature_extractor, image_encoder = load_image_encoders(args)
|
| 94 |
+
image_encoder.to(device)
|
| 95 |
+
|
| 96 |
+
# encode image with image encoder
|
| 97 |
+
sample_prompts_image_embs = {}
|
| 98 |
+
for prompt_dict in prompts:
|
| 99 |
+
image_path = prompt_dict.get("image_path", None)
|
| 100 |
+
assert image_path is not None, "image_path should be set for I2V training"
|
| 101 |
+
if image_path in sample_prompts_image_embs:
|
| 102 |
+
continue
|
| 103 |
+
|
| 104 |
+
logger.info(f"Encoding image to image encoder context: {image_path}")
|
| 105 |
+
|
| 106 |
+
height = prompt_dict.get("height", 256)
|
| 107 |
+
width = prompt_dict.get("width", 256)
|
| 108 |
+
|
| 109 |
+
img = Image.open(image_path).convert("RGB")
|
| 110 |
+
img_np = np.array(img) # PIL to numpy, HWC
|
| 111 |
+
img_np = image_video_dataset.resize_image_to_bucket(img_np, (width, height)) # returns a numpy array
|
| 112 |
+
|
| 113 |
+
with torch.no_grad():
|
| 114 |
+
image_encoder_output = hf_clip_vision_encode(img_np, feature_extractor, image_encoder)
|
| 115 |
+
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
| 116 |
+
|
| 117 |
+
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to("cpu")
|
| 118 |
+
sample_prompts_image_embs[image_path] = image_encoder_last_hidden_state
|
| 119 |
+
|
| 120 |
+
del image_encoder
|
| 121 |
+
clean_memory_on_device(device)
|
| 122 |
+
|
| 123 |
+
# prepare sample parameters
|
| 124 |
+
sample_parameters = []
|
| 125 |
+
for prompt_dict in prompts:
|
| 126 |
+
prompt_dict_copy = prompt_dict.copy()
|
| 127 |
+
|
| 128 |
+
p = prompt_dict.get("prompt", "")
|
| 129 |
+
llama_vec, llama_attention_mask, clip_l_pooler = sample_prompts_te_outputs[p]
|
| 130 |
+
prompt_dict_copy["llama_vec"] = llama_vec
|
| 131 |
+
prompt_dict_copy["llama_attention_mask"] = llama_attention_mask
|
| 132 |
+
prompt_dict_copy["clip_l_pooler"] = clip_l_pooler
|
| 133 |
+
|
| 134 |
+
p = prompt_dict.get("negative_prompt", "")
|
| 135 |
+
llama_vec, llama_attention_mask, clip_l_pooler = sample_prompts_te_outputs[p]
|
| 136 |
+
prompt_dict_copy["negative_llama_vec"] = llama_vec
|
| 137 |
+
prompt_dict_copy["negative_llama_attention_mask"] = llama_attention_mask
|
| 138 |
+
prompt_dict_copy["negative_clip_l_pooler"] = clip_l_pooler
|
| 139 |
+
|
| 140 |
+
p = prompt_dict.get("image_path", None)
|
| 141 |
+
prompt_dict_copy["image_encoder_last_hidden_state"] = sample_prompts_image_embs[p]
|
| 142 |
+
|
| 143 |
+
sample_parameters.append(prompt_dict_copy)
|
| 144 |
+
|
| 145 |
+
clean_memory_on_device(accelerator.device)
|
| 146 |
+
return sample_parameters
|
| 147 |
+
|
| 148 |
+
def do_inference(
|
| 149 |
+
self,
|
| 150 |
+
accelerator,
|
| 151 |
+
args,
|
| 152 |
+
sample_parameter,
|
| 153 |
+
vae,
|
| 154 |
+
dit_dtype,
|
| 155 |
+
transformer,
|
| 156 |
+
discrete_flow_shift,
|
| 157 |
+
sample_steps,
|
| 158 |
+
width,
|
| 159 |
+
height,
|
| 160 |
+
frame_count,
|
| 161 |
+
generator,
|
| 162 |
+
do_classifier_free_guidance,
|
| 163 |
+
guidance_scale,
|
| 164 |
+
cfg_scale,
|
| 165 |
+
image_path=None,
|
| 166 |
+
control_video_path=None,
|
| 167 |
+
):
|
| 168 |
+
"""architecture dependent inference"""
|
| 169 |
+
model: HunyuanVideoTransformer3DModelPacked = transformer
|
| 170 |
+
device = accelerator.device
|
| 171 |
+
if cfg_scale is None:
|
| 172 |
+
cfg_scale = 1.0
|
| 173 |
+
do_classifier_free_guidance = do_classifier_free_guidance and cfg_scale != 1.0
|
| 174 |
+
|
| 175 |
+
# prepare parameters
|
| 176 |
+
one_frame_mode = args.one_frame
|
| 177 |
+
if one_frame_mode:
|
| 178 |
+
one_frame_inference = set()
|
| 179 |
+
for mode in sample_parameter["one_frame"].split(","):
|
| 180 |
+
one_frame_inference.add(mode.strip())
|
| 181 |
+
else:
|
| 182 |
+
one_frame_inference = None
|
| 183 |
+
|
| 184 |
+
latent_window_size = args.latent_window_size # default is 9
|
| 185 |
+
latent_f = (frame_count - 1) // 4 + 1
|
| 186 |
+
total_latent_sections = math.floor((latent_f - 1) / latent_window_size)
|
| 187 |
+
if total_latent_sections < 1 and not one_frame_mode:
|
| 188 |
+
logger.warning(f"Not enough frames for FramePack: {latent_f}, minimum: {latent_window_size * 4 + 1}")
|
| 189 |
+
return None
|
| 190 |
+
|
| 191 |
+
latent_f = total_latent_sections * latent_window_size + 1
|
| 192 |
+
actual_frame_count = (latent_f - 1) * 4 + 1
|
| 193 |
+
if actual_frame_count != frame_count:
|
| 194 |
+
logger.info(f"Frame count mismatch: {actual_frame_count} != {frame_count}, trimming to {actual_frame_count}")
|
| 195 |
+
frame_count = actual_frame_count
|
| 196 |
+
num_frames = latent_window_size * 4 - 3
|
| 197 |
+
|
| 198 |
+
# prepare start and control latent
|
| 199 |
+
def encode_image(path):
|
| 200 |
+
image = Image.open(path)
|
| 201 |
+
if image.mode == "RGBA":
|
| 202 |
+
alpha = image.split()[-1]
|
| 203 |
+
image = image.convert("RGB")
|
| 204 |
+
else:
|
| 205 |
+
alpha = None
|
| 206 |
+
image = resize_image_to_bucket(image, (width, height)) # returns a numpy array
|
| 207 |
+
image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(1).unsqueeze(0).float() # 1, C, 1, H, W
|
| 208 |
+
image = image / 127.5 - 1 # -1 to 1
|
| 209 |
+
return hunyuan.vae_encode(image, vae).to("cpu"), alpha
|
| 210 |
+
|
| 211 |
+
# VAE encoding
|
| 212 |
+
logger.info("Encoding image to latent space")
|
| 213 |
+
vae.to(device)
|
| 214 |
+
|
| 215 |
+
start_latent, _ = (
|
| 216 |
+
encode_image(image_path) if image_path else torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32)
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
if one_frame_mode:
|
| 220 |
+
control_latents = []
|
| 221 |
+
control_alphas = []
|
| 222 |
+
if "control_image_path" in sample_parameter:
|
| 223 |
+
for control_image_path in sample_parameter["control_image_path"]:
|
| 224 |
+
control_latent, control_alpha = encode_image(control_image_path)
|
| 225 |
+
control_latents.append(control_latent)
|
| 226 |
+
control_alphas.append(control_alpha)
|
| 227 |
+
else:
|
| 228 |
+
control_latents = None
|
| 229 |
+
control_alphas = None
|
| 230 |
+
|
| 231 |
+
vae.to("cpu") # move VAE to CPU to save memory
|
| 232 |
+
clean_memory_on_device(device)
|
| 233 |
+
|
| 234 |
+
# sampilng
|
| 235 |
+
if not one_frame_mode:
|
| 236 |
+
f1_mode = args.f1
|
| 237 |
+
history_latents = torch.zeros((1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32)
|
| 238 |
+
|
| 239 |
+
if not f1_mode:
|
| 240 |
+
total_generated_latent_frames = 0
|
| 241 |
+
latent_paddings = reversed(range(total_latent_sections))
|
| 242 |
+
else:
|
| 243 |
+
total_generated_latent_frames = 1
|
| 244 |
+
history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
|
| 245 |
+
latent_paddings = [0] * total_latent_sections
|
| 246 |
+
|
| 247 |
+
if total_latent_sections > 4:
|
| 248 |
+
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
| 249 |
+
|
| 250 |
+
latent_paddings = list(latent_paddings)
|
| 251 |
+
for loop_index in range(total_latent_sections):
|
| 252 |
+
latent_padding = latent_paddings[loop_index]
|
| 253 |
+
|
| 254 |
+
if not f1_mode:
|
| 255 |
+
is_last_section = latent_padding == 0
|
| 256 |
+
latent_padding_size = latent_padding * latent_window_size
|
| 257 |
+
|
| 258 |
+
logger.info(f"latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}")
|
| 259 |
+
|
| 260 |
+
indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
|
| 261 |
+
(
|
| 262 |
+
clean_latent_indices_pre,
|
| 263 |
+
blank_indices,
|
| 264 |
+
latent_indices,
|
| 265 |
+
clean_latent_indices_post,
|
| 266 |
+
clean_latent_2x_indices,
|
| 267 |
+
clean_latent_4x_indices,
|
| 268 |
+
) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
|
| 269 |
+
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
| 270 |
+
|
| 271 |
+
clean_latents_pre = start_latent.to(history_latents)
|
| 272 |
+
clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split(
|
| 273 |
+
[1, 2, 16], dim=2
|
| 274 |
+
)
|
| 275 |
+
clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
|
| 276 |
+
else:
|
| 277 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
| 278 |
+
(
|
| 279 |
+
clean_latent_indices_start,
|
| 280 |
+
clean_latent_4x_indices,
|
| 281 |
+
clean_latent_2x_indices,
|
| 282 |
+
clean_latent_1x_indices,
|
| 283 |
+
latent_indices,
|
| 284 |
+
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
| 285 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
| 286 |
+
|
| 287 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]) :, :, :].split(
|
| 288 |
+
[16, 2, 1], dim=2
|
| 289 |
+
)
|
| 290 |
+
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
|
| 291 |
+
|
| 292 |
+
# if use_teacache:
|
| 293 |
+
# transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
| 294 |
+
# else:
|
| 295 |
+
# transformer.initialize_teacache(enable_teacache=False)
|
| 296 |
+
|
| 297 |
+
llama_vec = sample_parameter["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 298 |
+
llama_attention_mask = sample_parameter["llama_attention_mask"].to(device)
|
| 299 |
+
clip_l_pooler = sample_parameter["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 300 |
+
if cfg_scale == 1.0:
|
| 301 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
| 302 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
| 303 |
+
else:
|
| 304 |
+
llama_vec_n = sample_parameter["negative_llama_vec"].to(device, dtype=torch.bfloat16)
|
| 305 |
+
llama_attention_mask_n = sample_parameter["negative_llama_attention_mask"].to(device)
|
| 306 |
+
clip_l_pooler_n = sample_parameter["negative_clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 307 |
+
image_encoder_last_hidden_state = sample_parameter["image_encoder_last_hidden_state"].to(
|
| 308 |
+
device, dtype=torch.bfloat16
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
generated_latents = sample_hunyuan(
|
| 312 |
+
transformer=model,
|
| 313 |
+
sampler=args.sample_solver,
|
| 314 |
+
width=width,
|
| 315 |
+
height=height,
|
| 316 |
+
frames=num_frames,
|
| 317 |
+
real_guidance_scale=cfg_scale,
|
| 318 |
+
distilled_guidance_scale=guidance_scale,
|
| 319 |
+
guidance_rescale=0.0,
|
| 320 |
+
# shift=3.0,
|
| 321 |
+
num_inference_steps=sample_steps,
|
| 322 |
+
generator=generator,
|
| 323 |
+
prompt_embeds=llama_vec,
|
| 324 |
+
prompt_embeds_mask=llama_attention_mask,
|
| 325 |
+
prompt_poolers=clip_l_pooler,
|
| 326 |
+
negative_prompt_embeds=llama_vec_n,
|
| 327 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
| 328 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
| 329 |
+
device=device,
|
| 330 |
+
dtype=torch.bfloat16,
|
| 331 |
+
image_embeddings=image_encoder_last_hidden_state,
|
| 332 |
+
latent_indices=latent_indices,
|
| 333 |
+
clean_latents=clean_latents,
|
| 334 |
+
clean_latent_indices=clean_latent_indices,
|
| 335 |
+
clean_latents_2x=clean_latents_2x,
|
| 336 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
| 337 |
+
clean_latents_4x=clean_latents_4x,
|
| 338 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
total_generated_latent_frames += int(generated_latents.shape[2])
|
| 342 |
+
if not f1_mode:
|
| 343 |
+
if is_last_section:
|
| 344 |
+
generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2)
|
| 345 |
+
total_generated_latent_frames += 1
|
| 346 |
+
history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
|
| 347 |
+
real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
|
| 348 |
+
else:
|
| 349 |
+
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
|
| 350 |
+
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
|
| 351 |
+
|
| 352 |
+
logger.info(f"Generated. Latent shape {real_history_latents.shape}")
|
| 353 |
+
else:
|
| 354 |
+
# one frame mode
|
| 355 |
+
sample_num_frames = 1
|
| 356 |
+
latent_indices = torch.zeros((1, 1), dtype=torch.int64) # 1x1 latent index for target image
|
| 357 |
+
latent_indices[:, 0] = latent_window_size # last of latent_window
|
| 358 |
+
|
| 359 |
+
def get_latent_mask(mask_image: Image.Image):
|
| 360 |
+
mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS)
|
| 361 |
+
mask_image = np.array(mask_image) # PIL to numpy, HWC
|
| 362 |
+
mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC
|
| 363 |
+
mask_image = mask_image.squeeze(-1) # HWC -> HW
|
| 364 |
+
mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (B, C, F, H, W)
|
| 365 |
+
mask_image = mask_image.to(torch.float32)
|
| 366 |
+
return mask_image
|
| 367 |
+
|
| 368 |
+
if control_latents is None or len(control_latents) == 0:
|
| 369 |
+
logger.info("No control images provided for one frame inference. Use zero latents for control images.")
|
| 370 |
+
control_latents = [torch.zeros(1, 16, 1, height // 8, width // 8, dtype=torch.float32)]
|
| 371 |
+
|
| 372 |
+
if "no_post" not in one_frame_inference:
|
| 373 |
+
# add zero latents as clean latents post
|
| 374 |
+
control_latents.append(torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32))
|
| 375 |
+
logger.info("Add zero latents as clean latents post for one frame inference.")
|
| 376 |
+
|
| 377 |
+
# kisekaeichi and 1f-mc: both are using control images, but indices are different
|
| 378 |
+
clean_latents = torch.cat(control_latents, dim=2) # (1, 16, num_control_images, H//8, W//8)
|
| 379 |
+
clean_latent_indices = torch.zeros((1, len(control_latents)), dtype=torch.int64)
|
| 380 |
+
if "no_post" not in one_frame_inference:
|
| 381 |
+
clean_latent_indices[:, -1] = 1 + latent_window_size # default index for clean latents post
|
| 382 |
+
|
| 383 |
+
# apply mask for control latents (clean latents)
|
| 384 |
+
for i in range(len(control_alphas)):
|
| 385 |
+
control_alpha = control_alphas[i]
|
| 386 |
+
if control_alpha is not None:
|
| 387 |
+
latent_mask = get_latent_mask(control_alpha)
|
| 388 |
+
logger.info(f"Apply mask for clean latents 1x for {i + 1}: shape: {latent_mask.shape}")
|
| 389 |
+
clean_latents[:, :, i : i + 1, :, :] = clean_latents[:, :, i : i + 1, :, :] * latent_mask
|
| 390 |
+
|
| 391 |
+
for one_frame_param in one_frame_inference:
|
| 392 |
+
if one_frame_param.startswith("target_index="):
|
| 393 |
+
target_index = int(one_frame_param.split("=")[1])
|
| 394 |
+
latent_indices[:, 0] = target_index
|
| 395 |
+
logger.info(f"Set index for target: {target_index}")
|
| 396 |
+
elif one_frame_param.startswith("control_index="):
|
| 397 |
+
control_indices = one_frame_param.split("=")[1].split(";")
|
| 398 |
+
i = 0
|
| 399 |
+
while i < len(control_indices) and i < clean_latent_indices.shape[1]:
|
| 400 |
+
control_index = int(control_indices[i])
|
| 401 |
+
clean_latent_indices[:, i] = control_index
|
| 402 |
+
i += 1
|
| 403 |
+
logger.info(f"Set index for clean latent 1x: {control_indices}")
|
| 404 |
+
|
| 405 |
+
if "no_2x" in one_frame_inference:
|
| 406 |
+
clean_latents_2x = None
|
| 407 |
+
clean_latent_2x_indices = None
|
| 408 |
+
logger.info("No clean_latents_2x")
|
| 409 |
+
else:
|
| 410 |
+
clean_latents_2x = torch.zeros((1, 16, 2, height // 8, width // 8), dtype=torch.float32)
|
| 411 |
+
index = 1 + latent_window_size + 1
|
| 412 |
+
clean_latent_2x_indices = torch.arange(index, index + 2).unsqueeze(0) # 2
|
| 413 |
+
|
| 414 |
+
if "no_4x" in one_frame_inference:
|
| 415 |
+
clean_latents_4x = None
|
| 416 |
+
clean_latent_4x_indices = None
|
| 417 |
+
logger.info("No clean_latents_4x")
|
| 418 |
+
else:
|
| 419 |
+
clean_latents_4x = torch.zeros((1, 16, 16, height // 8, width // 8), dtype=torch.float32)
|
| 420 |
+
index = 1 + latent_window_size + 1 + 2
|
| 421 |
+
clean_latent_4x_indices = torch.arange(index, index + 16).unsqueeze(0) # 16
|
| 422 |
+
|
| 423 |
+
logger.info(
|
| 424 |
+
f"One frame inference. clean_latent: {clean_latents.shape} latent_indices: {latent_indices}, clean_latent_indices: {clean_latent_indices}, num_frames: {sample_num_frames}"
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
# prepare conditioning inputs
|
| 428 |
+
llama_vec = sample_parameter["llama_vec"].to(device, dtype=torch.bfloat16)
|
| 429 |
+
llama_attention_mask = sample_parameter["llama_attention_mask"].to(device)
|
| 430 |
+
clip_l_pooler = sample_parameter["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 431 |
+
if cfg_scale == 1.0:
|
| 432 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
| 433 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
| 434 |
+
else:
|
| 435 |
+
llama_vec_n = sample_parameter["negative_llama_vec"].to(device, dtype=torch.bfloat16)
|
| 436 |
+
llama_attention_mask_n = sample_parameter["negative_llama_attention_mask"].to(device)
|
| 437 |
+
clip_l_pooler_n = sample_parameter["negative_clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 438 |
+
image_encoder_last_hidden_state = sample_parameter["image_encoder_last_hidden_state"].to(device, dtype=torch.bfloat16)
|
| 439 |
+
|
| 440 |
+
generated_latents = sample_hunyuan(
|
| 441 |
+
transformer=model,
|
| 442 |
+
sampler=args.sample_solver,
|
| 443 |
+
width=width,
|
| 444 |
+
height=height,
|
| 445 |
+
frames=1,
|
| 446 |
+
real_guidance_scale=cfg_scale,
|
| 447 |
+
distilled_guidance_scale=guidance_scale,
|
| 448 |
+
guidance_rescale=0.0,
|
| 449 |
+
# shift=3.0,
|
| 450 |
+
num_inference_steps=sample_steps,
|
| 451 |
+
generator=generator,
|
| 452 |
+
prompt_embeds=llama_vec,
|
| 453 |
+
prompt_embeds_mask=llama_attention_mask,
|
| 454 |
+
prompt_poolers=clip_l_pooler,
|
| 455 |
+
negative_prompt_embeds=llama_vec_n,
|
| 456 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
| 457 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
| 458 |
+
device=device,
|
| 459 |
+
dtype=torch.bfloat16,
|
| 460 |
+
image_embeddings=image_encoder_last_hidden_state,
|
| 461 |
+
latent_indices=latent_indices,
|
| 462 |
+
clean_latents=clean_latents,
|
| 463 |
+
clean_latent_indices=clean_latent_indices,
|
| 464 |
+
clean_latents_2x=clean_latents_2x,
|
| 465 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
| 466 |
+
clean_latents_4x=clean_latents_4x,
|
| 467 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
real_history_latents = generated_latents.to(clean_latents)
|
| 471 |
+
|
| 472 |
+
# wait for 5 seconds until block swap is done
|
| 473 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 474 |
+
time.sleep(5)
|
| 475 |
+
|
| 476 |
+
gc.collect()
|
| 477 |
+
clean_memory_on_device(device)
|
| 478 |
+
|
| 479 |
+
video = decode_latent(
|
| 480 |
+
latent_window_size, total_latent_sections, args.bulk_decode, vae, real_history_latents, device, one_frame_mode
|
| 481 |
+
)
|
| 482 |
+
video = video.to("cpu", dtype=torch.float32).unsqueeze(0) # add batch dimension
|
| 483 |
+
video = (video / 2 + 0.5).clamp(0, 1) # -1 to 1 -> 0 to 1
|
| 484 |
+
clean_memory_on_device(device)
|
| 485 |
+
|
| 486 |
+
return video
|
| 487 |
+
|
| 488 |
+
def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str):
|
| 489 |
+
vae_path = args.vae
|
| 490 |
+
logger.info(f"Loading VAE model from {vae_path}")
|
| 491 |
+
vae = load_framepack_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, args.vae_tiling, "cpu")
|
| 492 |
+
return vae
|
| 493 |
+
|
| 494 |
+
def load_transformer(
|
| 495 |
+
self,
|
| 496 |
+
accelerator: Accelerator,
|
| 497 |
+
args: argparse.Namespace,
|
| 498 |
+
dit_path: str,
|
| 499 |
+
attn_mode: str,
|
| 500 |
+
split_attn: bool,
|
| 501 |
+
loading_device: str,
|
| 502 |
+
dit_weight_dtype: Optional[torch.dtype],
|
| 503 |
+
):
|
| 504 |
+
logger.info(f"Loading DiT model from {dit_path}")
|
| 505 |
+
device = accelerator.device
|
| 506 |
+
model = load_packed_model(
|
| 507 |
+
device, dit_path, attn_mode, loading_device, args.fp8_scaled, split_attn, disable_numpy_memmap=args.disable_numpy_memmap
|
| 508 |
+
)
|
| 509 |
+
return model
|
| 510 |
+
|
| 511 |
+
def compile_transformer(self, args, transformer):
|
| 512 |
+
transformer: HunyuanVideoTransformer3DModelPacked = transformer
|
| 513 |
+
return model_utils.compile_transformer(
|
| 514 |
+
args,
|
| 515 |
+
transformer,
|
| 516 |
+
[transformer.transformer_blocks, transformer.single_transformer_blocks],
|
| 517 |
+
disable_linear=self.blocks_to_swap > 0,
|
| 518 |
+
)
|
| 519 |
+
|
| 520 |
+
def scale_shift_latents(self, latents):
|
| 521 |
+
# FramePack VAE includes scaling
|
| 522 |
+
return latents
|
| 523 |
+
|
| 524 |
+
def call_dit(
|
| 525 |
+
self,
|
| 526 |
+
args: argparse.Namespace,
|
| 527 |
+
accelerator: Accelerator,
|
| 528 |
+
transformer,
|
| 529 |
+
latents: torch.Tensor,
|
| 530 |
+
batch: dict[str, torch.Tensor],
|
| 531 |
+
noise: torch.Tensor,
|
| 532 |
+
noisy_model_input: torch.Tensor,
|
| 533 |
+
timesteps: torch.Tensor,
|
| 534 |
+
network_dtype: torch.dtype,
|
| 535 |
+
):
|
| 536 |
+
model: HunyuanVideoTransformer3DModelPacked = transformer
|
| 537 |
+
device = accelerator.device
|
| 538 |
+
batch_size = latents.shape[0]
|
| 539 |
+
|
| 540 |
+
# maybe model.dtype is better than network_dtype...
|
| 541 |
+
distilled_guidance = torch.tensor([args.guidance_scale * 1000.0] * batch_size).to(device=device, dtype=network_dtype)
|
| 542 |
+
latents = latents.to(device=accelerator.device, dtype=network_dtype)
|
| 543 |
+
noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype)
|
| 544 |
+
# for k, v in batch.items():
|
| 545 |
+
# if isinstance(v, torch.Tensor):
|
| 546 |
+
# print(f"{k}: {v.shape} {v.dtype} {v.device}")
|
| 547 |
+
with accelerator.autocast():
|
| 548 |
+
clean_latent_2x_indices = batch["clean_latent_2x_indices"] if "clean_latent_2x_indices" in batch else None
|
| 549 |
+
if clean_latent_2x_indices is not None:
|
| 550 |
+
clean_latent_2x = batch["latents_clean_2x"] if "latents_clean_2x" in batch else None
|
| 551 |
+
if clean_latent_2x is None:
|
| 552 |
+
clean_latent_2x = torch.zeros(
|
| 553 |
+
(batch_size, 16, 2, latents.shape[3], latents.shape[4]), dtype=latents.dtype, device=latents.device
|
| 554 |
+
)
|
| 555 |
+
else:
|
| 556 |
+
clean_latent_2x = None
|
| 557 |
+
|
| 558 |
+
clean_latent_4x_indices = batch["clean_latent_4x_indices"] if "clean_latent_4x_indices" in batch else None
|
| 559 |
+
if clean_latent_4x_indices is not None:
|
| 560 |
+
clean_latent_4x = batch["latents_clean_4x"] if "latents_clean_4x" in batch else None
|
| 561 |
+
if clean_latent_4x is None:
|
| 562 |
+
clean_latent_4x = torch.zeros(
|
| 563 |
+
(batch_size, 16, 16, latents.shape[3], latents.shape[4]), dtype=latents.dtype, device=latents.device
|
| 564 |
+
)
|
| 565 |
+
else:
|
| 566 |
+
clean_latent_4x = None
|
| 567 |
+
|
| 568 |
+
model_pred = model(
|
| 569 |
+
hidden_states=noisy_model_input,
|
| 570 |
+
timestep=timesteps,
|
| 571 |
+
encoder_hidden_states=batch["llama_vec"],
|
| 572 |
+
encoder_attention_mask=batch["llama_attention_mask"],
|
| 573 |
+
pooled_projections=batch["clip_l_pooler"],
|
| 574 |
+
guidance=distilled_guidance,
|
| 575 |
+
latent_indices=batch["latent_indices"],
|
| 576 |
+
clean_latents=batch["latents_clean"],
|
| 577 |
+
clean_latent_indices=batch["clean_latent_indices"],
|
| 578 |
+
clean_latents_2x=clean_latent_2x,
|
| 579 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
| 580 |
+
clean_latents_4x=clean_latent_4x,
|
| 581 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
| 582 |
+
image_embeddings=batch["image_embeddings"],
|
| 583 |
+
return_dict=False,
|
| 584 |
+
)
|
| 585 |
+
model_pred = model_pred[0] # returns tuple (model_pred, )
|
| 586 |
+
|
| 587 |
+
# flow matching loss
|
| 588 |
+
target = noise - latents
|
| 589 |
+
|
| 590 |
+
return model_pred, target
|
| 591 |
+
|
| 592 |
+
# endregion model specific
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 596 |
+
"""FramePack specific parser setup"""
|
| 597 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT / DiTにスケーリングされたfp8を使う")
|
| 598 |
+
parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for LLM / LLMにfp8を使う")
|
| 599 |
+
parser.add_argument("--text_encoder1", type=str, help="Text Encoder 1 directory / テキストエンコーダ1のディレクトリ")
|
| 600 |
+
parser.add_argument("--text_encoder2", type=str, help="Text Encoder 2 directory / テキストエンコーダ2のディレクトリ")
|
| 601 |
+
parser.add_argument(
|
| 602 |
+
"--vae_tiling",
|
| 603 |
+
action="store_true",
|
| 604 |
+
help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled",
|
| 605 |
+
)
|
| 606 |
+
parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE")
|
| 607 |
+
parser.add_argument(
|
| 608 |
+
"--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256"
|
| 609 |
+
)
|
| 610 |
+
parser.add_argument("--image_encoder", type=str, default=None, help="Image encoder (CLIP) checkpoint path or directory")
|
| 611 |
+
parser.add_argument("--latent_window_size", type=int, default=9, help="FramePack latent window size (default 9)")
|
| 612 |
+
parser.add_argument("--bulk_decode", action="store_true", help="decode all frames at once in sample generation")
|
| 613 |
+
parser.add_argument("--f1", action="store_true", help="Use F1 sampling method for sample generation")
|
| 614 |
+
parser.add_argument("--one_frame", action="store_true", help="Use one frame sampling method for sample generation")
|
| 615 |
+
return parser
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def main():
|
| 619 |
+
parser = setup_parser_common()
|
| 620 |
+
parser = framepack_setup_parser(parser)
|
| 621 |
+
|
| 622 |
+
args = parser.parse_args()
|
| 623 |
+
args = read_config_from_file(args, parser)
|
| 624 |
+
|
| 625 |
+
assert args.vae_dtype is None or args.vae_dtype == "float16", (
|
| 626 |
+
"VAE dtype must be float16 / VAEのdtypeはfloat16でなければなりません"
|
| 627 |
+
)
|
| 628 |
+
args.vae_dtype = "float16" # fixed
|
| 629 |
+
args.dit_dtype = "bfloat16" # fixed
|
| 630 |
+
args.sample_solver = "unipc" # for sample generation, fixed to unipc
|
| 631 |
+
|
| 632 |
+
trainer = FramePackNetworkTrainer()
|
| 633 |
+
trainer.train(args)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
if __name__ == "__main__":
|
| 637 |
+
main()
|