Add files using upload-large-folder tool
Browse files- .gitignore +17 -0
- .python-version +1 -0
- CONTRIBUTING.ja.md +319 -0
- CONTRIBUTING.md +318 -0
- README.ja.md +314 -0
- README.md +326 -0
- cache_latents.py +4 -0
- cache_text_encoder_outputs.py +4 -0
- caption_images_by_qwen_vl.py +4 -0
- convert_lora.py +4 -0
- flux_2_cache_latents.py +4 -0
- flux_2_cache_text_encoder_outputs.py +4 -0
- flux_2_generate_image.py +4 -0
- flux_2_train_network.py +4 -0
- flux_kontext_cache_latents.py +4 -0
- flux_kontext_cache_text_encoder_outputs.py +4 -0
- flux_kontext_generate_image.py +4 -0
- flux_kontext_train_network.py +4 -0
- fpack_cache_latents.py +4 -0
- fpack_cache_text_encoder_outputs.py +4 -0
- fpack_generate_video.py +4 -0
- fpack_train_network.py +4 -0
- hv_1_5_cache_latents.py +4 -0
- hv_1_5_cache_text_encoder_outputs.py +4 -0
- hv_1_5_generate_video.py +4 -0
- hv_1_5_train_network.py +4 -0
- hv_generate_video.py +4 -0
- hv_train.py +4 -0
- hv_train_network.py +4 -0
- kandinsky5_cache_latents.py +4 -0
- kandinsky5_cache_text_encoder_outputs.py +4 -0
- kandinsky5_generate_video.py +4 -0
- kandinsky5_train_network.py +4 -0
- lora_post_hoc_ema.py +4 -0
- ltx2_cache_dino_features.py +4 -0
- ltx2_cache_latents.py +4 -0
- ltx2_cache_text_encoder_outputs.py +4 -0
- ltx2_generate_video.py +4 -0
- ltx2_merge_lora.py +5 -0
- ltx2_train_network.py +4 -0
- ltx2_train_slider.py +6 -0
- pyproject.toml +191 -0
- qwen_extract_lora.py +4 -0
- qwen_image_cache_latents.py +4 -0
- qwen_image_cache_text_encoder_outputs.py +4 -0
- qwen_image_generate_image.py +4 -0
- wan_cache_latents.py +4 -0
- wan_cache_text_encoder_outputs.py +4 -0
- wan_train_network.py +4 -0
- zimage_train.py +4 -0
.gitignore
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
.venv
|
| 3 |
+
venv/
|
| 4 |
+
logs/
|
| 5 |
+
uv.lock
|
| 6 |
+
main.exp
|
| 7 |
+
main.lib
|
| 8 |
+
main.obj
|
| 9 |
+
CLAUDE.md
|
| 10 |
+
GEMINI.md
|
| 11 |
+
.claude/
|
| 12 |
+
.gemini/
|
| 13 |
+
AGENTS.md
|
| 14 |
+
.vscode/settings.json
|
| 15 |
+
.mcp.json
|
| 16 |
+
references/
|
| 17 |
+
projects/
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.10
|
CONTRIBUTING.ja.md
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Musubi Tuner へのコントリビューション
|
| 2 |
+
|
| 3 |
+
Musubi Tuner 開発へのご支援、ご協力に感謝いたします。コミュニティからの開発へ手助けは、このプロジェクトにとって不可欠です。このドキュメントでは、コントリビューションの方法やプロジェクトへの参加方法について説明します。
|
| 4 |
+
|
| 5 |
+
## 目次
|
| 6 |
+
|
| 7 |
+
- [はじめに](#はじめに)
|
| 8 |
+
- [ご協力いただく前に](#ご協力いただく前に)
|
| 9 |
+
- [プロジェクトへの協力の方法](#プロジェクトへの協力の方法)
|
| 10 |
+
- [問題の報告](#問題の報告)
|
| 11 |
+
- [機能の提案](#機能の提案)
|
| 12 |
+
- [コードのコントリビューション](#コードのコントリビューション)
|
| 13 |
+
- [開発環境のセットアップ](#開発環境のセットアップ)
|
| 14 |
+
- [コードスタイルとガイドライン](#コードスタイルとガイドライン)
|
| 15 |
+
- [テスト](#テスト)
|
| 16 |
+
- [プルリクエストのプロセス](#プルリクエストのプロセス)
|
| 17 |
+
- [ライセンスと帰属(Attribution)](#ライセンスと帰属attribution)
|
| 18 |
+
- [コミュニティとサポート](#コミュニティとサポート)
|
| 19 |
+
|
| 20 |
+
## はじめに
|
| 21 |
+
|
| 22 |
+
開発にご協力いただく前に以下をお願いします:
|
| 23 |
+
|
| 24 |
+
1. このドキュメントを読む
|
| 25 |
+
2. [README.md](README.md) でプロジェクトを理解する
|
| 26 |
+
3. [既存の Issue](https://github.com/kohya-ss/musubi-tuner/issues) と [ディスカッション](https://github.com/kohya-ss/musubi-tuner/discussions) を確認する
|
| 27 |
+
4. 開発環境をセットアップする
|
| 28 |
+
|
| 29 |
+
## ご協力いただく前に
|
| 30 |
+
|
| 31 |
+
### ご留意いただきたい点
|
| 32 |
+
|
| 33 |
+
- このプロジェクトのメンテナンスは限られた時間とリソースで行われています
|
| 34 |
+
- PRのレビューとマージには時間がかかる場合があります
|
| 35 |
+
- プロジェクトが成長する過程で破壊的変更が発生する可能性があります
|
| 36 |
+
- 質問や一般的な議論には [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions) をご利用ください
|
| 37 |
+
- バグ報告や機能要求には [GitHub Issues](https://github.com/kohya-ss/musubi-tuner/issues) をご利用ください
|
| 38 |
+
|
| 39 |
+
### ご協力いただける例
|
| 40 |
+
|
| 41 |
+
- バグ修正
|
| 42 |
+
- パフォーマンスの改善
|
| 43 |
+
- ドキュメントの改善
|
| 44 |
+
- 新機能の追加(事前にディスカッションを行うことを推奨)
|
| 45 |
+
- コード品質の改善
|
| 46 |
+
|
| 47 |
+
## プロジェクトへの協力の方法
|
| 48 |
+
|
| 49 |
+
### 問題の報告
|
| 50 |
+
|
| 51 |
+
新しい Issue を作成する前に:
|
| 52 |
+
|
| 53 |
+
1. **既存の Issue を検索**して重複を避けてください
|
| 54 |
+
2. **ディスカッションを確認**して、質問が既に回答されていないか確認してください
|
| 55 |
+
|
| 56 |
+
バグ報告を作成する際は以下を含めてください:
|
| 57 |
+
|
| 58 |
+
- **明確で内容を適切に要約したタイトル**
|
| 59 |
+
- **問題の詳細な説明**
|
| 60 |
+
- **問題を再現する手順**
|
| 61 |
+
- **環境の詳細**:
|
| 62 |
+
- オペレーティングシステム
|
| 63 |
+
- GPU モデルと VRAM
|
| 64 |
+
- Python バージョン
|
| 65 |
+
- PyTorch バージョン
|
| 66 |
+
- CUDA バージョン
|
| 67 |
+
- **エラーメッセージやログ**
|
| 68 |
+
- **期待される動作と実際の動作**
|
| 69 |
+
- **スクリーンショットや動画**(必要な場合)
|
| 70 |
+
|
| 71 |
+
### 機能の提案
|
| 72 |
+
|
| 73 |
+
機能要求の場合:
|
| 74 |
+
|
| 75 |
+
1. **まず Issue を開いて**機能について議論してください
|
| 76 |
+
2. **機能が解決する問題を説明**します
|
| 77 |
+
3. **提案された解決策を説明**します
|
| 78 |
+
4. **代替案とそのトレードオフ**を検討してください
|
| 79 |
+
5. **実装前にフィードバックを待つ**ことをお願いします(PRがマージされない可能性は常にあります)
|
| 80 |
+
|
| 81 |
+
重要な機能については、まず [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions) にコミュニティの意見を求めることを検討してください。
|
| 82 |
+
|
| 83 |
+
### コードのコントリビューション
|
| 84 |
+
|
| 85 |
+
1. **Issue を開いて**提案する変更について議論をお願いします(些細な修正ではない場合)
|
| 86 |
+
2. **重要な変更の作業を開始する前に承認を待つ**ことをお願いします
|
| 87 |
+
3. **リポジトリをフォーク**して機能ブランチを作成する
|
| 88 |
+
4. **コードスタイルガイドライン**に従って変更を行う
|
| 89 |
+
5. **変更を徹底的にテスト**する
|
| 90 |
+
6. **プルリクエストを提出**する
|
| 91 |
+
|
| 92 |
+
## 開発環境のセットアップ
|
| 93 |
+
|
| 94 |
+
### 前提条件
|
| 95 |
+
|
| 96 |
+
- Python 3.10 以上
|
| 97 |
+
- Git
|
| 98 |
+
- CUDA 対応 GPU(GPU 機能のテスト用)
|
| 99 |
+
- 12GB 以上の VRAM 推奨
|
| 100 |
+
|
| 101 |
+
### インストール
|
| 102 |
+
|
| 103 |
+
1. **リポジトリをフォークしてクローン**:
|
| 104 |
+
```shell
|
| 105 |
+
git clone https://github.com/your-username/musubi-tuner.git
|
| 106 |
+
cd musubi-tuner
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
2. **開発環境をセットアップ**:
|
| 110 |
+
|
| 111 |
+
**オプション A: pip を使用**
|
| 112 |
+
```shell
|
| 113 |
+
# 仮想環境を作成
|
| 114 |
+
python -m venv .venv
|
| 115 |
+
|
| 116 |
+
# 仮想環境をアクティベート
|
| 117 |
+
# Windows の場合:
|
| 118 |
+
.venv/Scripts/activate
|
| 119 |
+
# Linux/Mac の場合:
|
| 120 |
+
source .venv/bin/activate
|
| 121 |
+
|
| 122 |
+
# PyTorch をインストール(CUDA バージョンに合わせて調整)
|
| 123 |
+
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu128
|
| 124 |
+
|
| 125 |
+
# パッケージを開発モードでインストール
|
| 126 |
+
pip install -e .
|
| 127 |
+
|
| 128 |
+
# 開発依存関係をインストール
|
| 129 |
+
pip install --group dev
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
**オプション B: uv を使用**
|
| 133 |
+
```shell
|
| 134 |
+
# uv がインストールされていない場合はインストール
|
| 135 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh # Linux/Mac
|
| 136 |
+
# または
|
| 137 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex" # Windows
|
| 138 |
+
|
| 139 |
+
# 依存関係をインストール
|
| 140 |
+
uv sync --extra cu128 # または CUDA バージョンにより cu124
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
3. **Accelerate を設定**:
|
| 144 |
+
```shell
|
| 145 |
+
accelerate config
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
## コードスタイルとガイドライン
|
| 149 |
+
|
| 150 |
+
### Python コードスタイル
|
| 151 |
+
|
| 152 |
+
このプロジェクトは **Ruff** をコード解析(リンティング)とコード整形に使用しています:
|
| 153 |
+
|
| 154 |
+
- **行の長さ**: 132 文字
|
| 155 |
+
- **インデント**: 4 スペース
|
| 156 |
+
- **クォートスタイル**: ダブルクォート
|
| 157 |
+
- **対象 Python バージョン**: 3.10
|
| 158 |
+
|
| 159 |
+
### IDE のセットアップ
|
| 160 |
+
|
| 161 |
+
https://docs.astral.sh/ruff/editors/setup/
|
| 162 |
+
|
| 163 |
+
### コード解析、整形の実行
|
| 164 |
+
|
| 165 |
+
```shell
|
| 166 |
+
# コードスタイルと潜在的な問題をチェック
|
| 167 |
+
ruff check
|
| 168 |
+
|
| 169 |
+
# 可能な場合は自動修正
|
| 170 |
+
ruff check --fix
|
| 171 |
+
|
| 172 |
+
# コードをフォーマット(注: フォーマットには black ではなく ruff を使用してください)
|
| 173 |
+
ruff format src
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
### コードガイドライン
|
| 177 |
+
|
| 178 |
+
- **コードベースの既存パターン**に従う
|
| 179 |
+
- **明確で説明的な変数名**を書く
|
| 180 |
+
- **適切な場所で型ヒント**を追加する
|
| 181 |
+
- **関数を単一の機能で適度なサイズ**に保つ
|
| 182 |
+
- **パブリック関数とクラスにドキュメント文字列**を追加する
|
| 183 |
+
- **エラーを適切に処理**: 回復不可能なエラーはそのままエラーとし、適切な対応が可能なエラーのみをキャッチして処理する
|
| 184 |
+
|
| 185 |
+
### インポートの整理
|
| 186 |
+
|
| 187 |
+
- 標準ライブラリのインポートを最初に
|
| 188 |
+
- サードパーティのインポートを次に
|
| 189 |
+
- ローカルインポートを最後に
|
| 190 |
+
- 可能な限り絶対インポートを使用
|
| 191 |
+
|
| 192 |
+
### コード修正ガイドライン
|
| 193 |
+
|
| 194 |
+
既存のコードを扱う場合:
|
| 195 |
+
|
| 196 |
+
- **既存のインターフェースとの互換性を維持**する
|
| 197 |
+
- **既存のモジュール構造に従う**
|
| 198 |
+
- **`docs/` ディレクトリの関連ドキュメントを更新**する
|
| 199 |
+
- **変更が複数のシステムに影響する場合、能力があれば異なるアーキテクチャでテスト**する
|
| 200 |
+
|
| 201 |
+
アーキテクチャ固有のコード(HunyuanVideo、Wan2.1/2.2、FramePack、FLUX.1 Kontext、Qwen-Image)を扱う場合:
|
| 202 |
+
|
| 203 |
+
- **命名規則に従う**: 新しいアーキテクチャを追加する場合は `{arch}_train_network.py` と `{arch}_generate_{type}.py` の命名パターンに従う
|
| 204 |
+
- **アーキテクチャ間の影響を考慮する**: アーキテクチャ間で共有されるコードを変更する場合
|
| 205 |
+
- **他のアーキテクチャでのテスト**: 変更が他のアーキテクチャに影響する場合、可能であればそれらでもテストする
|
| 206 |
+
|
| 207 |
+
## テスト
|
| 208 |
+
|
| 209 |
+
### テストの実行
|
| 210 |
+
|
| 211 |
+
```shell
|
| 212 |
+
# コード品質チェックを実行
|
| 213 |
+
ruff check
|
| 214 |
+
|
| 215 |
+
# コードをフォーマット
|
| 216 |
+
ruff format src
|
| 217 |
+
|
| 218 |
+
# 関連するスクリプトで変更を手動テスト
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
### 手動テストガイドライン
|
| 222 |
+
|
| 223 |
+
このプロジェクトは機械学習モデルを扱っています。そのため:
|
| 224 |
+
|
| 225 |
+
1. **まず小さなデータセット**で始めてください
|
| 226 |
+
2. **メモリ使用量が期待する範囲内**であることを確認してください
|
| 227 |
+
3. **可能であれば異なる GPU 構成**でテストをお願いします
|
| 228 |
+
4. **生成/訓練機能の出力品質**を検証してください
|
| 229 |
+
|
| 230 |
+
## プルリクエストのプロセス
|
| 231 |
+
|
| 232 |
+
### 提出前
|
| 233 |
+
|
| 234 |
+
1. **当該ブランチが最新のメインブランチと同期**していることを確認してください
|
| 235 |
+
2. **コード品質ツールを実行**:
|
| 236 |
+
```shell
|
| 237 |
+
ruff check --fix
|
| 238 |
+
ruff format src
|
| 239 |
+
```
|
| 240 |
+
3. **変更を徹底的にテスト**します
|
| 241 |
+
4. **必要に応じてドキュメントを更新**してください
|
| 242 |
+
5. **明確なコミットメッセージ**を書いてください
|
| 243 |
+
|
| 244 |
+
### プルリクエストテンプレート
|
| 245 |
+
|
| 246 |
+
PR を作成する際は以下を含めてください:
|
| 247 |
+
|
| 248 |
+
- **変更を説明する明確なタイトル**
|
| 249 |
+
- **何が変更されたか、なぜかの説明**
|
| 250 |
+
- **Issue への参照**(例:「Closes #123」)
|
| 251 |
+
- **実行されたテスト**
|
| 252 |
+
- **破壊的変更**(もしある場合)
|
| 253 |
+
- **ドキュメントの更新**(もしある場合)
|
| 254 |
+
|
| 255 |
+
### レビュープロセス
|
| 256 |
+
|
| 257 |
+
- メンテナーは時間があるときに PR をレビューします
|
| 258 |
+
- 限られたリソースのためレビューに時間がかかる場合があります��、ご了承ください
|
| 259 |
+
- フィードバックに建設的に対処してください
|
| 260 |
+
- 議論を集中的かつ専門的に保つようお願いします
|
| 261 |
+
|
| 262 |
+
## ライセンスと帰属(Attribution)
|
| 263 |
+
|
| 264 |
+
### 帰属の要件
|
| 265 |
+
|
| 266 |
+
他のプロジェクトから派生または着想を得たコードを追加する場合:
|
| 267 |
+
|
| 268 |
+
1. **新しいファイルに適切なライセンスヘッダー**を追加する
|
| 269 |
+
2. **コピー/修正されたコードに帰属コメント**を含める
|
| 270 |
+
3. 新しいアーキテクチャのために**新しいライセンス要求を導入する場合は README.md の LICENSE セクションを更新**する
|
| 271 |
+
4. **プルリクエストの説明で参照元を文書化**する
|
| 272 |
+
|
| 273 |
+
### サードパーティのコード
|
| 274 |
+
|
| 275 |
+
あなたのコントリビューションにサードパーティのコードが含まれる場合:
|
| 276 |
+
|
| 277 |
+
1. **プロジェクトとのライセンス互換性を確保**する
|
| 278 |
+
2. **元のライセンスファイルまたはヘッダーを含める**
|
| 279 |
+
3. **ソースとライセンスを明確に文書化**する。プルリクエストの説明にも記載してください
|
| 280 |
+
4. **ソースライセンスからのすべての義務を履行**する
|
| 281 |
+
|
| 282 |
+
## コミュニティとサポート
|
| 283 |
+
|
| 284 |
+
### 情報交換の手段
|
| 285 |
+
|
| 286 |
+
- **GitHub Discussions**: 一般的な質問、アイデア、コミュニティの交流
|
| 287 |
+
- **GitHub Issues**: バグ報告と機能要求
|
| 288 |
+
- **Pull Requests**: コード貢献とレビュー
|
| 289 |
+
|
| 290 |
+
### 何かわからないことがあれば
|
| 291 |
+
|
| 292 |
+
内容に応じて以下の方法で質問してください:
|
| 293 |
+
|
| 294 |
+
- **ソフトウェアの使用法など**: [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions) をチェックしてください
|
| 295 |
+
- **開発環境のセットアップ**: 「question」ラベルで Issue を作成してするか、ディスカッションで質問してください
|
| 296 |
+
- **コントリビューションのプロセス**: このガイドを参照するか、ディスカッションで質問してください
|
| 297 |
+
|
| 298 |
+
### ご協力いただいた方への謝辞など
|
| 299 |
+
|
| 300 |
+
ご協力いただいた方は以下の方法等でご紹介させていただきます:
|
| 301 |
+
|
| 302 |
+
- **Git コミット履歴**
|
| 303 |
+
- **重要なコントリビューションのリリースノート**
|
| 304 |
+
- **主要な機能への README での謝辞**
|
| 305 |
+
|
| 306 |
+
---
|
| 307 |
+
|
| 308 |
+
## 最終に
|
| 309 |
+
|
| 310 |
+
Musubi Tuner へのご協力に興味をお持ちいただき、ありがとうございます。このプロジェクトはコミュニティのご支援、ご協力で成り立っています。
|
| 311 |
+
|
| 312 |
+
以下についてご留意いただければ幸いです:
|
| 313 |
+
|
| 314 |
+
- **最初のコントリビューションは小さく**始めることをお勧めします
|
| 315 |
+
- **何か不明な点があれば質問**してください
|
| 316 |
+
- **レビュープロセスは辛抱強く**お待ちいただければ幸いです
|
| 317 |
+
- **ツールの構築と改善に一緒に取り組めればと**考えています
|
| 318 |
+
|
| 319 |
+
皆様のMusubi Tuner へのご協力に重ねて感謝申し上げます。
|
CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to Musubi Tuner
|
| 2 |
+
|
| 3 |
+
Thank you for your interest in contributing to Musubi Tuner! We welcome contributions from the community and are excited to work with you to make this project even better.
|
| 4 |
+
|
| 5 |
+
## Table of Contents
|
| 6 |
+
|
| 7 |
+
- [Getting Started](#getting-started)
|
| 8 |
+
- [Before You Contribute](#before-you-contribute)
|
| 9 |
+
- [How to Contribute](#how-to-contribute)
|
| 10 |
+
- [Reporting Issues](#reporting-issues)
|
| 11 |
+
- [Suggesting Features](#suggesting-features)
|
| 12 |
+
- [Contributing Code](#contributing-code)
|
| 13 |
+
- [Development Setup](#development-setup)
|
| 14 |
+
- [Code Style and Guidelines](#code-style-and-guidelines)
|
| 15 |
+
- [Testing](#testing)
|
| 16 |
+
- [Pull Request Process](#pull-request-process)
|
| 17 |
+
- [Licensing and Attribution](#licensing-and-attribution)
|
| 18 |
+
- [Community and Support](#community-and-support)
|
| 19 |
+
|
| 20 |
+
## Getting Started
|
| 21 |
+
|
| 22 |
+
Before contributing, please:
|
| 23 |
+
|
| 24 |
+
1. Read through this contributing guide
|
| 25 |
+
2. Review the [README.md](README.md) to understand the project
|
| 26 |
+
3. Check the [existing issues](https://github.com/kohya-ss/musubi-tuner/issues) and [discussions](https://github.com/kohya-ss/musubi-tuner/discussions)
|
| 27 |
+
4. Set up your development environment
|
| 28 |
+
|
| 29 |
+
## Before You Contribute
|
| 30 |
+
|
| 31 |
+
### Important Notes
|
| 32 |
+
|
| 33 |
+
- This project is under active development with limited maintainer resources
|
| 34 |
+
- PR reviews and merges may take time
|
| 35 |
+
- Breaking changes may occur as the project evolves
|
| 36 |
+
- For questions and general discussion, use [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions)
|
| 37 |
+
- For bug reports and feature requests, use [GitHub Issues](https://github.com/kohya-ss/musubi-tuner/issues)
|
| 38 |
+
|
| 39 |
+
### Types of Contributions We Welcome
|
| 40 |
+
|
| 41 |
+
- Bug fixes
|
| 42 |
+
- Performance improvements
|
| 43 |
+
- Documentation improvements
|
| 44 |
+
- New features (with prior discussion)
|
| 45 |
+
- Code quality improvements
|
| 46 |
+
|
| 47 |
+
## How to Contribute
|
| 48 |
+
|
| 49 |
+
### Reporting Issues
|
| 50 |
+
|
| 51 |
+
Before creating a new issue:
|
| 52 |
+
|
| 53 |
+
1. **Search existing issues** to avoid duplicates
|
| 54 |
+
2. **Check discussions** as your question might already be answered
|
| 55 |
+
|
| 56 |
+
When creating a bug report, include:
|
| 57 |
+
|
| 58 |
+
- **Clear, descriptive title**
|
| 59 |
+
- **Detailed description** of the problem
|
| 60 |
+
- **Steps to reproduce** the issue
|
| 61 |
+
- **Environment details**:
|
| 62 |
+
- Operating System
|
| 63 |
+
- GPU model and VRAM
|
| 64 |
+
- Python version
|
| 65 |
+
- PyTorch version
|
| 66 |
+
- CUDA version
|
| 67 |
+
- **Error messages or logs**
|
| 68 |
+
- **Expected vs actual behavior**
|
| 69 |
+
- **Screenshots or videos** (if applicable)
|
| 70 |
+
|
| 71 |
+
### Suggesting Features
|
| 72 |
+
|
| 73 |
+
For feature requests:
|
| 74 |
+
|
| 75 |
+
1. **Open an issue first** to discuss the feature
|
| 76 |
+
2. **Explain the problem** your feature would solve
|
| 77 |
+
3. **Describe the proposed solution**
|
| 78 |
+
4. **Consider alternatives** and their trade-offs
|
| 79 |
+
5. **Wait for feedback** before starting implementation (there's always a chance the PR won't be merged)
|
| 80 |
+
|
| 81 |
+
For significant features, consider posting in [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions) first to gather community input.
|
| 82 |
+
|
| 83 |
+
### Contributing Code
|
| 84 |
+
|
| 85 |
+
1. **Open an issue** to discuss your proposed changes (unless it's a trivial fix)
|
| 86 |
+
2. **Wait for approval** before starting work on significant changes
|
| 87 |
+
3. **Fork the repository** and create a feature branch
|
| 88 |
+
4. **Make your changes** following our code style guidelines
|
| 89 |
+
5. **Test your changes** thoroughly
|
| 90 |
+
6. **Submit a pull request**
|
| 91 |
+
|
| 92 |
+
## Development Setup
|
| 93 |
+
|
| 94 |
+
### Prerequisites
|
| 95 |
+
|
| 96 |
+
- Python 3.10 or later
|
| 97 |
+
- Git
|
| 98 |
+
- CUDA-compatible GPU (for testing GPU features)
|
| 99 |
+
- 12GB+ VRAM recommended
|
| 100 |
+
|
| 101 |
+
### Installation
|
| 102 |
+
|
| 103 |
+
1. **Fork and clone the repository**:
|
| 104 |
+
```shell
|
| 105 |
+
git clone https://github.com/your-username/musubi-tuner.git
|
| 106 |
+
cd musubi-tuner
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
2. **Set up the development environment**:
|
| 110 |
+
|
| 111 |
+
**Option A: Using pip**
|
| 112 |
+
```shell
|
| 113 |
+
# Create virtual environment
|
| 114 |
+
python -m venv .venv
|
| 115 |
+
|
| 116 |
+
# Activate virtual environment
|
| 117 |
+
# On Windows:
|
| 118 |
+
.venv/Scripts/activate
|
| 119 |
+
# On Linux/Mac:
|
| 120 |
+
source .venv/bin/activate
|
| 121 |
+
|
| 122 |
+
# Install PyTorch (adjust for your CUDA version)
|
| 123 |
+
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu128
|
| 124 |
+
|
| 125 |
+
# Install the package in development mode
|
| 126 |
+
pip install -e .
|
| 127 |
+
|
| 128 |
+
# Install development dependencies
|
| 129 |
+
pip install --group dev
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
**Option B: Using uv**
|
| 133 |
+
```shell
|
| 134 |
+
# Install uv if not present
|
| 135 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh # Linux/Mac
|
| 136 |
+
# or
|
| 137 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex" # Windows
|
| 138 |
+
|
| 139 |
+
# Install dependencies
|
| 140 |
+
uv sync --extra cu128 # or cu124 based on your CUDA version
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
3. **Configure Accelerate**:
|
| 144 |
+
```shell
|
| 145 |
+
accelerate config
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
## Code Style and Guidelines
|
| 149 |
+
|
| 150 |
+
### Python Code Style
|
| 151 |
+
|
| 152 |
+
This project uses **Ruff** for code linting and code formatting:
|
| 153 |
+
|
| 154 |
+
- **Line length**: 132 characters
|
| 155 |
+
- **Indentation**: 4 spaces
|
| 156 |
+
- **Quote style**: Double quotes
|
| 157 |
+
- **Target Python version**: 3.10
|
| 158 |
+
|
| 159 |
+
### IDE
|
| 160 |
+
|
| 161 |
+
https://docs.astral.sh/ruff/editors/setup/
|
| 162 |
+
|
| 163 |
+
### Running Code Quality Tools
|
| 164 |
+
|
| 165 |
+
```shell
|
| 166 |
+
# Check code style and potential issues
|
| 167 |
+
ruff check
|
| 168 |
+
|
| 169 |
+
# Auto-fix issues where possible
|
| 170 |
+
ruff check --fix
|
| 171 |
+
|
| 172 |
+
# Format code (note: use ruff for formatting, not black)
|
| 173 |
+
ruff format src
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
### Code Guidelines
|
| 177 |
+
|
| 178 |
+
- **Follow existing patterns** in the codebase
|
| 179 |
+
- **Write clear, descriptive variable names**
|
| 180 |
+
- **Add type hints** where appropriate
|
| 181 |
+
- **Keep functions focused** and reasonably sized
|
| 182 |
+
- **Add docstrings** for public functions and classes
|
| 183 |
+
- **Handle errors appropriately** - Let unrecoverable errors fail fast; only catch and handle errors you can meaningfully recover from
|
| 184 |
+
|
| 185 |
+
### Import Organization
|
| 186 |
+
|
| 187 |
+
- Standard library imports first
|
| 188 |
+
- Third-party imports second
|
| 189 |
+
- Local imports last
|
| 190 |
+
- Use absolute imports when possible
|
| 191 |
+
|
| 192 |
+
### Code Modification Guidelines
|
| 193 |
+
|
| 194 |
+
When working with existing code:
|
| 195 |
+
|
| 196 |
+
- **Maintain compatibility** with existing interfaces
|
| 197 |
+
- **Follow the existing module structure**
|
| 198 |
+
- **Update relevant documentation** in the `docs/` directory
|
| 199 |
+
- **Test across different architectures** if your changes affect multiple architectures and you have the capability to do so
|
| 200 |
+
|
| 201 |
+
When working with architecture-specific code (HunyuanVideo, Wan2.1/2.2, FramePack, FLUX.1 Kontext, Qwen-Image):
|
| 202 |
+
|
| 203 |
+
- **Follow naming conventions**: When adding a new architecture, follow the `{arch}_train_network.py` and `{arch}_generate_{type}.py` naming pattern
|
| 204 |
+
- **Consider cross-architecture impact** when making changes within shared modules
|
| 205 |
+
- **Test with representative models** if possible
|
| 206 |
+
|
| 207 |
+
## Testing
|
| 208 |
+
|
| 209 |
+
### Running Tests
|
| 210 |
+
|
| 211 |
+
```shell
|
| 212 |
+
# Run code quality checks
|
| 213 |
+
ruff check
|
| 214 |
+
|
| 215 |
+
# Format code
|
| 216 |
+
ruff format src
|
| 217 |
+
|
| 218 |
+
# Test your changes manually with the relevant scripts
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
### Manual Testing Guidelines
|
| 222 |
+
|
| 223 |
+
Since this project deals with machine learning models:
|
| 224 |
+
|
| 225 |
+
1. **Test with small datasets** first
|
| 226 |
+
2. **Verify memory usage** is within expected boundaries
|
| 227 |
+
3. **Test on different GPU configurations** if possible
|
| 228 |
+
4. **Validate output quality** for generation/training features
|
| 229 |
+
|
| 230 |
+
## Pull Request Process
|
| 231 |
+
|
| 232 |
+
### Before Submitting
|
| 233 |
+
|
| 234 |
+
1. **Ensure your branch is up to date** with the main branch
|
| 235 |
+
2. **Run code quality tools**:
|
| 236 |
+
```shell
|
| 237 |
+
ruff check --fix
|
| 238 |
+
ruff format src
|
| 239 |
+
```
|
| 240 |
+
3. **Test your changes** thoroughly
|
| 241 |
+
4. **Update documentation** if needed
|
| 242 |
+
5. **Write clear commit messages**
|
| 243 |
+
|
| 244 |
+
### Pull Request Template
|
| 245 |
+
|
| 246 |
+
When creating a PR, include:
|
| 247 |
+
|
| 248 |
+
- **Clear title** describing the change
|
| 249 |
+
- **Description** of what changed and why
|
| 250 |
+
- **Issue reference** (e.g., "Closes #123")
|
| 251 |
+
- **Testing performed**
|
| 252 |
+
- **Breaking changes** (if any)
|
| 253 |
+
- **Documentation updates** (if any)
|
| 254 |
+
|
| 255 |
+
### Review Process
|
| 256 |
+
|
| 257 |
+
- Maintainers will review PRs when time permit
|
| 258 |
+
- Be patient as reviews may take time due to limited resources
|
| 259 |
+
- Address feedback constructively
|
| 260 |
+
- Keep discussions focused and professional
|
| 261 |
+
|
| 262 |
+
## Licensing and Attribution
|
| 263 |
+
|
| 264 |
+
### Attribution Requirements
|
| 265 |
+
|
| 266 |
+
When contributing code derived from or inspired by other projects:
|
| 267 |
+
|
| 268 |
+
1. **Add appropriate license headers** to new files
|
| 269 |
+
2. **Include attribution comments** for copied/modified code
|
| 270 |
+
3. **Update the LICENSE section on README.md** if introducing new license requirements for new architectures
|
| 271 |
+
4. **Document the source** in your pull request description
|
| 272 |
+
|
| 273 |
+
### Third-Party Code
|
| 274 |
+
|
| 275 |
+
If your contribution includes third-party code:
|
| 276 |
+
|
| 277 |
+
1. **Ensure license compatibility** with the project
|
| 278 |
+
2. **Include the original license file** or header
|
| 279 |
+
3. **Document the source and license** clearly. Incorporate this in your pull request description as well
|
| 280 |
+
4. **Fulfill all obligations** from the source license
|
| 281 |
+
|
| 282 |
+
## Community and Support
|
| 283 |
+
|
| 284 |
+
### Communication Channels
|
| 285 |
+
|
| 286 |
+
- **GitHub Discussions**: General questions, ideas, and community interaction
|
| 287 |
+
- **GitHub Issues**: Bug reports and feature requests
|
| 288 |
+
- **Pull Requests**: Code contributions and reviews
|
| 289 |
+
|
| 290 |
+
### Getting Help
|
| 291 |
+
|
| 292 |
+
If you need help with:
|
| 293 |
+
|
| 294 |
+
- **Using the software**: Check [GitHub Discussions](https://github.com/kohya-ss/musubi-tuner/discussions)
|
| 295 |
+
- **Development setup**: Create an issue with the "question" label or ask in discussions
|
| 296 |
+
- **Contributing process**: Reference this guide or ask in discussions
|
| 297 |
+
|
| 298 |
+
### Recognition
|
| 299 |
+
|
| 300 |
+
Contributors are recognized through:
|
| 301 |
+
|
| 302 |
+
- **Git commit history**
|
| 303 |
+
- **Release notes** for significant contributions
|
| 304 |
+
- **README acknowledgments** for major features
|
| 305 |
+
|
| 306 |
+
---
|
| 307 |
+
|
| 308 |
+
## Final Notes
|
| 309 |
+
|
| 310 |
+
We appreciate your interest in contributing to Musubi Tuner! This project benefits greatly from community contributions, and we're grateful for your time and effort.
|
| 311 |
+
|
| 312 |
+
Remember:
|
| 313 |
+
- **Start small** with your first contribution
|
| 314 |
+
- **Ask questions** if anything is unclear
|
| 315 |
+
- **Be patient** with the review process
|
| 316 |
+
- **Have fun** building amazing tools!
|
| 317 |
+
|
| 318 |
+
Thank you for helping make Musubi Tuner better for everyone!
|
README.ja.md
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Musubi Tuner
|
| 2 |
+
|
| 3 |
+
[English](./README.md) | [日本語](./README.ja.md)
|
| 4 |
+
|
| 5 |
+
## 目次
|
| 6 |
+
|
| 7 |
+
<details>
|
| 8 |
+
<summary>クリックすると展開します</summary>
|
| 9 |
+
|
| 10 |
+
- [はじめに](#はじめに)
|
| 11 |
+
- [スポンサー](#スポンサー)
|
| 12 |
+
- [スポンサー募集のお知らせ](#スポンサー募集のお知らせ)
|
| 13 |
+
- [最近の更新](#最近の更新)
|
| 14 |
+
- [リリースについて](#リリースについて)
|
| 15 |
+
- [AIコーディングエージェントを使用する開発者の方へ](#AIコーディングエージェントを使用する開発者の方へ)
|
| 16 |
+
- [概要](#概要)
|
| 17 |
+
- [ハードウェア要件](#ハードウェア要件)
|
| 18 |
+
- [特徴](#特徴)
|
| 19 |
+
- [ドキュメント](#ドキュメント)
|
| 20 |
+
- [インストール](#インストール)
|
| 21 |
+
- [pipによるインストール](#pipによるインストール)
|
| 22 |
+
- [uvによるインストール](#uvによるインストール)
|
| 23 |
+
- [Linux/MacOS](#linuxmacos)
|
| 24 |
+
- [Windows](#windows)
|
| 25 |
+
- [モデルのダウンロード](#モデルのダウンロード)
|
| 26 |
+
- [使い方](#使い方)
|
| 27 |
+
- [データセット設定](#データセット設定)
|
| 28 |
+
- [事前キャッシュと学習](#事前キャッシュと学習)
|
| 29 |
+
- [Accelerateの設定](#Accelerateの設定)
|
| 30 |
+
- [学習と推論](#学習と推論)
|
| 31 |
+
- [その他](#その他)
|
| 32 |
+
- [SageAttentionのインストール方法](#SageAttentionのインストール方法)
|
| 33 |
+
- [PyTorchのバージョンについて](#PyTorchのバージョンについて)
|
| 34 |
+
- [免責事項](#免責事項)
|
| 35 |
+
- [コントリビューションについて](#コントリビューションについて)
|
| 36 |
+
- [ライセンス](#ライセンス)
|
| 37 |
+
</details>
|
| 38 |
+
|
| 39 |
+
## はじめに
|
| 40 |
+
|
| 41 |
+
このリポジトリは、HunyuanVideo、Wan2.1/2.2、FramePack、FLUX.1 Kontext、FLUX.2 dev/klein、Qwen-Image、Z-Image、および [LTX-2](./docs/ltx_2.md)のLoRA学習用のコマンドラインツールです。このリポジトリは非公式であり、それらの公式リポジトリとは関係ありません。
|
| 42 |
+
|
| 43 |
+
*リポジトリは開発中です。*
|
| 44 |
+
|
| 45 |
+
### スポンサー
|
| 46 |
+
|
| 47 |
+
このプロジェクトを支援してくださる企業・団体の皆様に深く感謝いたします。
|
| 48 |
+
|
| 49 |
+
<a href="https://aihub.co.jp/">
|
| 50 |
+
<img src="./images/logo_aihub.png" alt="AiHUB株式会社" title="AiHUB株式会社" height="100px">
|
| 51 |
+
</a>
|
| 52 |
+
|
| 53 |
+
### スポンサー募集のお知らせ
|
| 54 |
+
|
| 55 |
+
このプロジェクトがお役に立ったなら、ご支援いただけると嬉しく思います。 [GitHub Sponsors](https://github.com/sponsors/kohya-ss/)で受け付けています。
|
| 56 |
+
|
| 57 |
+
### 最近の更新
|
| 58 |
+
|
| 59 |
+
GitHub Discussionsを有効にしました。コミュニティのQ&A、知識共有、技術情報の交換などにご利用ください。バグ報告や機能リクエストにはIssuesを、質問や経験の共有にはDiscussionsをご利用ください。[Discussionはこちら](https://github.com/kohya-ss/musubi-tuner/discussions)
|
| 60 |
+
|
| 61 |
+
- 2026/02/15
|
| 62 |
+
- LoHa/LoKrの学習に対応しました。[PR #900](https://github.com/kohya-ss/musubi-tuner/pull/900)
|
| 63 |
+
- LyCORISのLoHa/LoKrアルゴリズムに基づいて実装されています。LyCORISプロジェクトのKohakuBlueleaf氏に深く感謝します。
|
| 64 |
+
- 詳細は[ドキュメント](./docs/loha_lokr.md)を参照してください。
|
| 65 |
+
- Z-Imageのfine-tuningで、blocks_to_swapを使用している場合に、一部のオプティマイザを使用可能にする`--block_swap_optimizer_patch_params`オプションを追加しました。[PR #899](https://github.com/kohya-ss/musubi-tuner/pull/899)
|
| 66 |
+
- 詳細は[ドキュメント](./docs/zimage.md#finetuning)を参照してください。
|
| 67 |
+
|
| 68 |
+
- 2026/01/29
|
| 69 |
+
- Z-Image-Baseのリリースに伴いLoRA、finetuningの動作確認を行い、共に動作することを確認しました。
|
| 70 |
+
- Z-Imageの[関連ドキュメント](./docs/zimage.md)を修正しました。
|
| 71 |
+
- またZ-ImageのLoRA学習、finetuningでサンプル画像生成が正しく動作しなかったのを修正しました。以上は[PR #861](https://github.com/kohya-ss/musubi-tuner/pull/861)
|
| 72 |
+
|
| 73 |
+
- 2026/01/24
|
| 74 |
+
- FLUX.2 [klein]のLoRA学習が動かなかったのを修正しました。またFLUX.2に関する各種の不具合修正、機能追加を行いました。[PR #858](https://github.com/kohya-ss/musubi-tuner/pull/858)
|
| 75 |
+
- `--model_version`の指定は`flux.2-dev`や`flux.2-klein-4b`等から、`dev`や`klein-4b`等に変更されました。
|
| 76 |
+
- fp8最適化なども動作します。詳細は[ドキュメント](./docs/flux_2.md)を参照してください。
|
| 77 |
+
- klein 9B、devモデル、および複数枚の制御画像を用いた学習は十分にテストされていないため、不具合があればIssueで報告してください。
|
| 78 |
+
|
| 79 |
+
- 2026/01/21
|
| 80 |
+
- FLUX.2 [dev]/[klein]のLoRA学習に対応しました。[PR #841](https://github.com/kohya-ss/musubi-tuner/pull/841) https://www.scenario.com のchristopher5106氏に深く感謝します。
|
| 81 |
+
- 詳細は[ド��ュメント](./docs/flux_2.md)を参照してください。
|
| 82 |
+
|
| 83 |
+
- 2026/01/17
|
| 84 |
+
- Z-ImageのComfyUI向けのLoRA変換について、互換性向上のため `convert_lora.py` を使用するように変更しました。[PR #851](https://github.com/kohya-ss/musubi-tuner/pull/851)
|
| 85 |
+
- 以前の `convert_z_image_lora_to_comfy.py` も引き続き使用可能ですが、nunchakuで正しく動作しない可能性があります。
|
| 86 |
+
- 詳細は[ドキュメント](./docs/zimage.md#converting-lora-weights-to-diffusers-format-for-comfyui--lora重みをcomfyuiで使用可能なdiffusers形式に変換する)を参照してください。
|
| 87 |
+
- [Issue #847](https://github.com/kohya-ss/musubi-tuner/issues/847) で解決策を提供してくださったfai-9氏に感謝します。
|
| 88 |
+
- Qwen-Image-LayeredのLoRA学習で、元画像を学習対象から除外するオプション `--remove_first_image_from_target` を追加しました。[PR #852](https://github.com/kohya-ss/musubi-tuner/pull/852)
|
| 89 |
+
- 詳細は[ドキュメント](./docs/qwen_image.md#lora-training--lora学習)を参照してください。
|
| 90 |
+
|
| 91 |
+
- 2026/01/11
|
| 92 |
+
- Qwen-Image-LayeredのLoRA学習に対応しました。[PR #816](https://github.com/kohya-ss/musubi-tuner/pull/816)
|
| 93 |
+
- 詳細は[ドキュメント](./docs/qwen_image.md)を参照してください。
|
| 94 |
+
- キャッシュ作成、学習、推論の各スクリプトで、`--model_version` オプションに `layered` を指定してください。
|
| 95 |
+
|
| 96 |
+
### リリースについて
|
| 97 |
+
|
| 98 |
+
Musubi Tunerの解説記事執筆や、関連ツールの開発に取り組んでくださる方々に感謝いたします。このプロジェクトは開発中のため、互換性のない変更や機能追加が起きる可能性があります。想定外の互換性問題を避けるため、参照用として[リリース](https://github.com/kohya-ss/musubi-tuner/releases)をお使いください。
|
| 99 |
+
|
| 100 |
+
最新のリリースとバージョン履歴は[リリースページ](https://github.com/kohya-ss/musubi-tuner/releases)で確認できます。
|
| 101 |
+
|
| 102 |
+
### AIコーディングエージェントを使用する開発者の方へ
|
| 103 |
+
|
| 104 |
+
このリポジトリでは、ClaudeやGeminiのようなAIエージェントが、プロジェクトの概要や構造を理解しやすくするためのエージェント向け文書(プロンプト)を用意しています。
|
| 105 |
+
|
| 106 |
+
これらを使用するためには、プロジェクトのルートディレクトリに各エージェント向けの設定ファイルを作成し、明示的に読み込む必要があります。
|
| 107 |
+
|
| 108 |
+
**セットアップ手順:**
|
| 109 |
+
|
| 110 |
+
1. プロジェクトのルートに `CLAUDE.md` や `GEMINI.md`、`AGENTS.md` ファイルを作成します。
|
| 111 |
+
2. `CLAUDE.md` 等に以下の行を追加して、リポジトリが推奨するプロンプトをインポートします(現在、両者はほぼ同じ内容です):
|
| 112 |
+
|
| 113 |
+
```markdown
|
| 114 |
+
@./.ai/claude.prompt.md
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
Geminiの場合はこちらです:
|
| 118 |
+
|
| 119 |
+
```markdown
|
| 120 |
+
@./.ai/gemini.prompt.md
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
他のエージェント向けの設定ファイルでもそれぞれの方法でインポートしてください。
|
| 124 |
+
|
| 125 |
+
3. インポートした行の後に、必要な指示を適宜追加してください(例:`Always respond in Japanese.`)。
|
| 126 |
+
|
| 127 |
+
このアプローチにより、共有されたプロジェクトのコンテキストを活用しつつ、エージェントに与える指示を各ユーザーが自由に制御できます。`CLAUDE.md`、`GEMINI.md` および `AGENTS.md` (またClaude用の `.mcp.json`)はすでに `.gitignore` に記載されているため、リポジトリにコミットされることはありません。
|
| 128 |
+
|
| 129 |
+
## 概要
|
| 130 |
+
|
| 131 |
+
### ハードウェア要件
|
| 132 |
+
|
| 133 |
+
- VRAM: 静止画での学習は12GB以上推奨、動画での学習は24GB以上推奨。
|
| 134 |
+
- *アーキテクチャ、解像度等の学習設定により異なります。*12GBでは解像度 960x544 以下とし、`--blocks_to_swap`、`--fp8_llm`等の省メモリオプションを使用してください。
|
| 135 |
+
- メインメモリ: 64GB以上を推奨、32GB+スワップで動作するかもしれませんが、未検証です。
|
| 136 |
+
|
| 137 |
+
### 特徴
|
| 138 |
+
|
| 139 |
+
- 省メモリに特化
|
| 140 |
+
- Windows対応(Linuxでの動作報告もあります)
|
| 141 |
+
- マルチGPU学習([Accelerate](https://huggingface.co/docs/accelerate/index)を使用)、ドキュメントは後日追加予定
|
| 142 |
+
|
| 143 |
+
### ドキュメント
|
| 144 |
+
|
| 145 |
+
各アーキテクチャの詳細、設定、高度な機能については、以下のドキュメントを参照してください。
|
| 146 |
+
|
| 147 |
+
**アーキテクチャ別:**
|
| 148 |
+
- [HunyuanVideo](./docs/hunyuan_video.md)
|
| 149 |
+
- [Wan2.1/2.2](./docs/wan.md)
|
| 150 |
+
- [Wan2.1/2.2 (1フレーム推論)](./docs/wan_1f.md)
|
| 151 |
+
- [FramePack](./docs/framepack.md)
|
| 152 |
+
- [FramePack (1フレーム推論)](./docs/framepack_1f.md)
|
| 153 |
+
- [FLUX.1 Kontext](./docs/flux_kontext.md)
|
| 154 |
+
- [Qwen-Image](./docs/qwen_image.md)
|
| 155 |
+
- [Z-Image](./docs/zimage.md)
|
| 156 |
+
- [HunyuanVideo 1.5](./docs/hunyuan_video_1_5.md)
|
| 157 |
+
- [Kandinsky 5](./docs/kandinsky5.md)
|
| 158 |
+
- [LTX-2](./docs/ltx_2.md)
|
| 159 |
+
- [FLUX.2](./docs/flux_2.md)
|
| 160 |
+
|
| 161 |
+
**共通設定・その他:**
|
| 162 |
+
- [データセット設定](./docs/dataset_config.md)
|
| 163 |
+
- [高度な設定](./docs/advanced_config.md)
|
| 164 |
+
- [学習中のサンプル生成](./docs/sampling_during_training.md)
|
| 165 |
+
- [ツールとユーティリティ](./docs/tools.md)
|
| 166 |
+
- [torch.compileの使用方法](./docs/torch_compile.md)
|
| 167 |
+
|
| 168 |
+
## インストール
|
| 169 |
+
|
| 170 |
+
### pipによるインストール
|
| 171 |
+
|
| 172 |
+
Python 3.10以上を使用してください(3.10で動作確認済み)。
|
| 173 |
+
|
| 174 |
+
適当な仮想環境を作成し、ご利用のCUDAバージョンに合わせたPyTorchとtorchvisionをインストールしてください。
|
| 175 |
+
|
| 176 |
+
PyTorchはバージョン2.5.1以上を使用してください([補足](#PyTorchのバージョンについて))。
|
| 177 |
+
|
| 178 |
+
```bash
|
| 179 |
+
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu124
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
以下のコマンドを使用して、必要な依存関係をインストールします。
|
| 183 |
+
|
| 184 |
+
```bash
|
| 185 |
+
pip install -e .
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
オプションとして、FlashAttention、SageAttention(**推論にのみ使用できます**、インストール方法は[こちら](#SageAttentionのインストール方法)を参照)を使用できます。
|
| 189 |
+
|
| 190 |
+
また、`ascii-magic`(データセットの確認に使用)、`matplotlib`(timestepsの可視化に使用)、`tensorboard`(学習ログの記録に使用)、`prompt-toolkit`を必要に応じてインストールしてください。
|
| 191 |
+
|
| 192 |
+
`prompt-toolkit`をインストールするとWan2.1およびFramePackのinteractive modeでの編集に、自動的に使用されます。特にLinux環境でプロンプトの編集が容易になります。
|
| 193 |
+
|
| 194 |
+
```bash
|
| 195 |
+
pip install ascii-magic matplotlib tensorboard prompt-toolkit
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
### uvによるインストール
|
| 199 |
+
|
| 200 |
+
uvを使用してインストールすることもできますが、uvによるインストールは試験的なものです。フィードバックを歓迎します。
|
| 201 |
+
|
| 202 |
+
#### Linux/MacOS
|
| 203 |
+
|
| 204 |
+
```sh
|
| 205 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
表示される指示に従い、pathを設定してください。
|
| 209 |
+
|
| 210 |
+
#### Windows
|
| 211 |
+
|
| 212 |
+
```powershell
|
| 213 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
表示される指示に従い、PATHを設定するか、この時点でシステムを再起動してください。
|
| 217 |
+
|
| 218 |
+
## モデルのダウンロード
|
| 219 |
+
|
| 220 |
+
モデルのダウンロード手順はアーキテクチャによって異なります。詳細は[ドキュメント](#ドキュメント)セクションにある、各アーキテクチャのドキュメントを参照してください。
|
| 221 |
+
|
| 222 |
+
## 使い方
|
| 223 |
+
|
| 224 |
+
### データセット設定
|
| 225 |
+
|
| 226 |
+
[こちら](./docs/dataset_config.md)を参照してください。
|
| 227 |
+
|
| 228 |
+
### 事前キャッシュ
|
| 229 |
+
|
| 230 |
+
事前キャッシュの手順の詳細は、[ドキュメント](#ドキュメント)セクションにある各アーキテクチャのドキュメントを参照してください。
|
| 231 |
+
|
| 232 |
+
### Accelerateの設定
|
| 233 |
+
|
| 234 |
+
`accelerate config`を実行して、Accelerateの設定を行います。それぞれの質問に、環境に応じた適切な値を選択してください(値を直接入力するか、矢印キーとエンターで選択、大文字がデフォルトなので、デフォルト値でよい場合は何も入力せずエンター)。GPU 1台での学習の場合、以下のように答えてください。
|
| 235 |
+
|
| 236 |
+
```txt
|
| 237 |
+
- In which compute environment are you running?: This machine
|
| 238 |
+
- Which type of machine are you using?: No distributed training
|
| 239 |
+
- Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)?[yes/NO]: NO
|
| 240 |
+
- Do you wish to optimize your script with torch dynamo?[yes/NO]: NO
|
| 241 |
+
- Do you want to use DeepSpeed? [yes/NO]: NO
|
| 242 |
+
- What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]: all
|
| 243 |
+
- Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: NO
|
| 244 |
+
- Do you wish to use mixed precision?: bf16
|
| 245 |
+
```
|
| 246 |
+
|
| 247 |
+
※場合によって ``ValueError: fp16 mixed precision requires a GPU`` というエラーが出ることがあるようです。この場合、6番目の質問(
|
| 248 |
+
``What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:``)に「0」と答えてください。(id `0`、つまり1台目のGPUが使われます。)
|
| 249 |
+
|
| 250 |
+
### 学習と推論
|
| 251 |
+
|
| 252 |
+
学習と推論の手順はアーキテクチャによって大きく異なります。詳細な手順については、[ドキュメント](#ドキュメント)セクションにある対応するアーキテクチャのドキュメント、および各種の設定のドキュメントを参照してください。
|
| 253 |
+
|
| 254 |
+
## その他
|
| 255 |
+
|
| 256 |
+
### SageAttentionのインストール方法
|
| 257 |
+
|
| 258 |
+
sdbds氏によるWindows対応のSageAttentionのwheelが https://github.com/sdbds/SageAttention-for-windows で公開されています。triton をインストールし、Python、PyTorch、CUDAのバージョ���が一致する場合は、[Releases](https://github.com/sdbds/SageAttention-for-windows/releases)からビルド済みwheelをダウンロードしてインストールすることが可能です。sdbds氏に感謝します。
|
| 259 |
+
|
| 260 |
+
参考までに、以下は、SageAttentionをビルドしインストールするための簡単な手順です。Microsoft Visual C++ 再頒布可能パッケージを最新にする必要があるかもしれません。
|
| 261 |
+
|
| 262 |
+
1. Pythonのバージョンに応じたtriton 3.1.0のwhellを[こちら](https://github.com/woct0rdho/triton-windows/releases/tag/v3.1.0-windows.post5)からダウンロードしてインストールします。
|
| 263 |
+
|
| 264 |
+
2. Microsoft Visual Studio 2022かBuild Tools for Visual Studio 2022を、C++のビルドができるよう設定し、インストールします。(上のRedditの投稿を参照してください)。
|
| 265 |
+
|
| 266 |
+
3. 任意のフォルダにSageAttentionのリポジトリをクローンします。
|
| 267 |
+
```shell
|
| 268 |
+
git clone https://github.com/thu-ml/SageAttention.git
|
| 269 |
+
```
|
| 270 |
+
|
| 271 |
+
4. スタートメニューから Visual Studio 2022 内の `x64 Native Tools Command Prompt for VS 2022` を選択してコマンドプロンプトを開きます。
|
| 272 |
+
|
| 273 |
+
5. venvを有効にし、SageAttentionのフォルダに移動して以下のコマンドを実行します。DISTUTILSが設定されていない、のようなエラーが出た場合は `set DISTUTILS_USE_SDK=1`としてから再度実行してください。
|
| 274 |
+
```shell
|
| 275 |
+
python setup.py install
|
| 276 |
+
```
|
| 277 |
+
|
| 278 |
+
以上でSageAttentionのインストールが完了です。
|
| 279 |
+
|
| 280 |
+
### PyTorchのバージョンについて
|
| 281 |
+
|
| 282 |
+
`--attn_mode`に`torch`を指定する場合、2.5.1以降のPyTorchを使用してください(それより前のバージョンでは生成される動画が真っ黒になるようです)。
|
| 283 |
+
|
| 284 |
+
古いバージョンを使う場合、xformersやSageAttentionを使用してください。
|
| 285 |
+
|
| 286 |
+
## 免責事項
|
| 287 |
+
|
| 288 |
+
このリポジトリは非公式であり、サポートされているアーキテクチャの公式リポジトリとは関係ありません。また、このリポジトリは開発中で、実験的なものです。テストおよびフィードバックを歓迎しますが、以下の点にご注意ください:
|
| 289 |
+
|
| 290 |
+
- 実際の稼働環境での動作を意図したものではありません
|
| 291 |
+
- 機能やAPIは予告なく変更されることがあります
|
| 292 |
+
- いくつもの機能が未検証です
|
| 293 |
+
- 動画学習機能はまだ開発中です
|
| 294 |
+
|
| 295 |
+
問題やバグについては、以下の情報とともにIssueを作成してください:
|
| 296 |
+
|
| 297 |
+
- 問題の詳細な説明
|
| 298 |
+
- 再現手順
|
| 299 |
+
- 環境の詳細(OS、GPU、VRAM、Pythonバージョンなど)
|
| 300 |
+
- 関連するエラーメッセージやログ
|
| 301 |
+
|
| 302 |
+
## コントリビューションについて
|
| 303 |
+
|
| 304 |
+
コントリビューションを歓迎します。 [CONTRIBUTING.md](./CONTRIBUTING.md)および[CONTRIBUTING.ja.md](./CONTRIBUTING.ja.md)をご覧ください。
|
| 305 |
+
|
| 306 |
+
## ライセンス
|
| 307 |
+
|
| 308 |
+
`hunyuan_model`ディレクトリ以下のコードは、[HunyuanVideo](https://github.com/Tencent/HunyuanVideo)のコードを一部改変して使用しているため、そちらのライセンスに従います。
|
| 309 |
+
|
| 310 |
+
`wan`ディレクトリ以下のコードは、[Wan2.1](https://github.com/Wan-Video/Wan2.1)のコードを一部改変して使用しています。ライセンスはApache License 2.0です。
|
| 311 |
+
|
| 312 |
+
`frame_pack`ディレクトリ以下のコードは、[frame_pack](https://github.com/lllyasviel/FramePack)のコードを一部改変して使用しています。ライセンスはApache License 2.0です。
|
| 313 |
+
|
| 314 |
+
他のコードはApache License 2.0に従います。一部Diffusersのコードをコピー、改変して使用しています。
|
README.md
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Musubi Tuner
|
| 2 |
+
|
| 3 |
+
[English](./README.md) | [日本語](./README.ja.md)
|
| 4 |
+
|
| 5 |
+
## Table of Contents
|
| 6 |
+
|
| 7 |
+
<details>
|
| 8 |
+
<summary>Click to expand</summary>
|
| 9 |
+
|
| 10 |
+
- [Musubi Tuner](#musubi-tuner)
|
| 11 |
+
- [Table of Contents](#table-of-contents)
|
| 12 |
+
- [Introduction](#introduction)
|
| 13 |
+
- [Sponsors](#sponsors)
|
| 14 |
+
- [Support the Project](#support-the-project)
|
| 15 |
+
- [Recent Updates](#recent-updates)
|
| 16 |
+
- [Releases](#releases)
|
| 17 |
+
- [For Developers Using AI Coding Agents](#for-developers-using-ai-coding-agents)
|
| 18 |
+
- [Overview](#overview)
|
| 19 |
+
- [Hardware Requirements](#hardware-requirements)
|
| 20 |
+
- [Features](#features)
|
| 21 |
+
- [Documentation](#documentation)
|
| 22 |
+
- [Installation](#installation)
|
| 23 |
+
- [pip based installation](#pip-based-installation)
|
| 24 |
+
- [uv based installation](#uv-based-installation-experimental)
|
| 25 |
+
- [Linux/MacOS](#linuxmacos)
|
| 26 |
+
- [Windows](#windows)
|
| 27 |
+
- [Model Download](#model-download)
|
| 28 |
+
- [Usage](#usage)
|
| 29 |
+
- [Dataset Configuration](#dataset-configuration)
|
| 30 |
+
- [Pre-caching and Training](#pre-caching-and-training)
|
| 31 |
+
- [Configuration of Accelerate](#configuration-of-accelerate)
|
| 32 |
+
- [Training and Inference](#training-and-inference)
|
| 33 |
+
- [Miscellaneous](#miscellaneous)
|
| 34 |
+
- [SageAttention Installation](#sageattention-installation)
|
| 35 |
+
- [PyTorch version](#pytorch-version)
|
| 36 |
+
- [Disclaimer](#disclaimer)
|
| 37 |
+
- [Contributing](#contributing)
|
| 38 |
+
- [License](#license)
|
| 39 |
+
|
| 40 |
+
</details>
|
| 41 |
+
|
| 42 |
+
## Introduction
|
| 43 |
+
|
| 44 |
+
This repository provides scripts for training LoRA (Low-Rank Adaptation) models with HunyuanVideo, Wan2.1/2.2, FramePack, FLUX.1 Kontext, FLUX.2 dev/klein, Qwen-Image, Z-Image, and [LTX-2](./docs/ltx_2.md) architectures.
|
| 45 |
+
|
| 46 |
+
This repository is unofficial and not affiliated with the official repositories of these architectures.
|
| 47 |
+
|
| 48 |
+
*This repository is under development.*
|
| 49 |
+
|
| 50 |
+
### Sponsors
|
| 51 |
+
|
| 52 |
+
We are grateful to the following companies for their generous sponsorship:
|
| 53 |
+
|
| 54 |
+
<a href="https://aihub.co.jp/top-en">
|
| 55 |
+
<img src="./images/logo_aihub.png" alt="AiHUB Inc." title="AiHUB Inc." height="100px">
|
| 56 |
+
</a>
|
| 57 |
+
|
| 58 |
+
### Support the Project
|
| 59 |
+
|
| 60 |
+
If you find this project helpful, please consider supporting its development via [GitHub Sponsors](https://github.com/sponsors/kohya-ss/). Your support is greatly appreciated!
|
| 61 |
+
|
| 62 |
+
### Recent Updates
|
| 63 |
+
|
| 64 |
+
GitHub Discussions Enabled: We've enabled GitHub Discussions for community Q&A, knowledge sharing, and technical information exchange. Please use Issues for bug reports and feature requests, and Discussions for questions and sharing experiences. [Join the conversation →](https://github.com/kohya-ss/musubi-tuner/discussions)
|
| 65 |
+
|
| 66 |
+
- February 15, 2026
|
| 67 |
+
- Added support for LoHa/LoKr training. See [PR #900](https://github.com/kohya-ss/musubi-tuner/pull/900)
|
| 68 |
+
- Implemented based on the LoHa/LoKr algorithms from LyCORIS. Special thanks to KohakuBlueleaf from the LyCORIS project.
|
| 69 |
+
- Please refer to the [documentation](./docs/loha_lokr.md) for details.
|
| 70 |
+
- Added `--block_swap_optimizer_patch_params` option to enable the use of some optimizers when using `blocks_to_swap` in Z-Image fine-tuning. See [PR #899](https://github.com/kohya-ss/musubi-tuner/pull/899)
|
| 71 |
+
- Please refer to the [documentation](./docs/zimage.md#finetuning) for details.
|
| 72 |
+
|
| 73 |
+
- January 29, 2026
|
| 74 |
+
- With the release of Z-Image-Base, we have verified that both LoRA and finetuning work correctly.
|
| 75 |
+
- Updated the [related documentation](./docs/zimage.md) for Z-Image.
|
| 76 |
+
- Fixed an issue where sample image generation did not work correctly in LoRA training and finetuning of Z-Image. See [PR #861](https://github.com/kohya-ss/musubi-tuner/pull/861).
|
| 77 |
+
|
| 78 |
+
- January 24, 2026
|
| 79 |
+
- Fixed an issue where LoRA training for FLUX.2 [klein] did not work. Also made various bug fixes and feature additions related to FLUX.2. See [PR #858](https://github.com/kohya-ss/musubi-tuner/pull/858).
|
| 80 |
+
- The `--model_version` specification has changed from `flux.2-dev` or `flux.2-klein-4b` to `dev` or `klein-4b`, etc.
|
| 81 |
+
- fp8 optimization and other features also work. Please refer to the [documentation](./docs/flux_2.md) for details.
|
| 82 |
+
- Since klein 9B, dev models, and training with multiple control images have not been sufficiently tested, please report any issues via Issue.
|
| 83 |
+
|
| 84 |
+
- January 21, 2026
|
| 85 |
+
- Added support for LoRA training of FLUX.2 [dev]/[klein]. See [PR #841](https://github.com/kohya-ss/musubi-tuner/pull/841). Many thanks to christopher5106 from https://www.scenario.com for this contribution.
|
| 86 |
+
- Please refer to the [documentation](./docs/flux_2.md) for details.
|
| 87 |
+
|
| 88 |
+
- January 17, 2026
|
| 89 |
+
- Changed to use `convert_lora.py` for converting Z-Image LoRA for ComfyUI to improve compatibility. See [PR #851](https://github.com/kohya-ss/musubi-tuner/pull/851).
|
| 90 |
+
- The previous `convert_z_image_lora_to_comfy.py` can still be used, but the converted weights may not work correctly with nunchaku.
|
| 91 |
+
- Please refer to the [documentation](./docs/zimage.md#converting-lora-weights-to-diffusers-format-for-comfyui--lora重みをcomfyuiで使用可能なdiffusers形式��変換する) for details.
|
| 92 |
+
- Many thanks to fai-9 for providing the solution in [Issue #847](https://github.com/kohya-ss/musubi-tuner/issues/847).
|
| 93 |
+
- Added `--remove_first_image_from_target` option for LoRA training of Qwen-Image-Layered. See [PR #852](https://github.com/kohya-ss/musubi-tuner/pull/852).
|
| 94 |
+
- Please refer to the [documentation](./docs/qwen_image.md#lora-training--lora学習) for details.
|
| 95 |
+
|
| 96 |
+
- January 11, 2026
|
| 97 |
+
- Added support for LoRA training of Qwen-Image-Layered. See [PR #816](https://github.com/kohya-ss/musubi-tuner/pull/816).
|
| 98 |
+
- Please refer to the [documentation](./docs/qwen_image.md) for details.
|
| 99 |
+
- In the caching, training, and inference scripts, specify `--model_version` option as `layered`.
|
| 100 |
+
|
| 101 |
+
### Releases
|
| 102 |
+
|
| 103 |
+
We are grateful to everyone who has been contributing to the Musubi Tuner ecosystem through documentation and third-party tools. To support these valuable contributions, we recommend working with our [releases](https://github.com/kohya-ss/musubi-tuner/releases) as stable reference points, as this project is under active development and breaking changes may occur.
|
| 104 |
+
|
| 105 |
+
You can find the latest release and version history in our [releases page](https://github.com/kohya-ss/musubi-tuner/releases).
|
| 106 |
+
|
| 107 |
+
### For Developers Using AI Coding Agents
|
| 108 |
+
|
| 109 |
+
This repository provides recommended instructions to help AI agents like Claude and Gemini understand our project context and coding standards.
|
| 110 |
+
|
| 111 |
+
To use them, you need to opt-in by creating your own configuration file in the project root.
|
| 112 |
+
|
| 113 |
+
**Quick Setup:**
|
| 114 |
+
|
| 115 |
+
1. Create a `CLAUDE.md`, `GEMINI.md`, and/or `AGENTS.md` file in the project root.
|
| 116 |
+
2. Add the following line to your `CLAUDE.md` to import the repository's recommended prompt (currently they are the almost same):
|
| 117 |
+
|
| 118 |
+
```markdown
|
| 119 |
+
@./.ai/claude.prompt.md
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
or for Gemini:
|
| 123 |
+
|
| 124 |
+
```markdown
|
| 125 |
+
@./.ai/gemini.prompt.md
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
You may be also import the prompt depending on the agent you are using with the custom prompt file such as `AGENTS.md`.
|
| 129 |
+
|
| 130 |
+
3. You can now add your own personal instructions below the import line (e.g., `Always include a short summary of the change before diving into details.`).
|
| 131 |
+
|
| 132 |
+
This approach ensures that you have full control over the instructions given to your agent while benefiting from the shared project context. Your `CLAUDE.md`, `GEMINI.md` and `AGENTS.md` (as well as Claude's `.mcp.json`) are already listed in `.gitignore`, so they won't be committed to the repository.
|
| 133 |
+
|
| 134 |
+
## Overview
|
| 135 |
+
|
| 136 |
+
### Hardware Requirements
|
| 137 |
+
|
| 138 |
+
- VRAM: 12GB or more recommended for image training, 24GB or more for video training
|
| 139 |
+
- *Actual requirements depend on resolution and training settings.* For 12GB, use a resolution of 960x544 or lower and use memory-saving options such as `--blocks_to_swap`, `--fp8_llm`, etc.
|
| 140 |
+
- Main Memory: 64GB or more recommended, 32GB + swap may work
|
| 141 |
+
|
| 142 |
+
### Features
|
| 143 |
+
|
| 144 |
+
- Memory-efficient implementation
|
| 145 |
+
- Windows compatibility confirmed (Linux compatibility confirmed by community)
|
| 146 |
+
- Multi-GPU training (using [Accelerate](https://huggingface.co/docs/accelerate/index)), documentation will be added later
|
| 147 |
+
|
| 148 |
+
### Documentation
|
| 149 |
+
|
| 150 |
+
For detailed information on specific architectures, configurations, and advanced features, please refer to the documentation below.
|
| 151 |
+
|
| 152 |
+
**Architecture-specific:**
|
| 153 |
+
- [HunyuanVideo](./docs/hunyuan_video.md)
|
| 154 |
+
- [Wan2.1/2.2](./docs/wan.md)
|
| 155 |
+
- [Wan2.1/2.2 (Single Frame)](./docs/wan_1f.md)
|
| 156 |
+
- [FramePack](./docs/framepack.md)
|
| 157 |
+
- [FramePack (Single Frame)](./docs/framepack_1f.md)
|
| 158 |
+
- [FLUX.1 Kontext](./docs/flux_kontext.md)
|
| 159 |
+
- [Qwen-Image](./docs/qwen_image.md)
|
| 160 |
+
- [Z-Image](./docs/zimage.md)
|
| 161 |
+
- [HunyuanVideo 1.5](./docs/hunyuan_video_1_5.md)
|
| 162 |
+
- [Kandinsky 5](./docs/kandinsky5.md)
|
| 163 |
+
- [LTX-2](./docs/ltx_2.md)
|
| 164 |
+
- [FLUX.2](./docs/flux_2.md)
|
| 165 |
+
|
| 166 |
+
**Common Configuration & Usage:**
|
| 167 |
+
- [Dataset Configuration](./docs/dataset_config.md)
|
| 168 |
+
- [Advanced Configuration](./docs/advanced_config.md)
|
| 169 |
+
- [Sampling during Training](./docs/sampling_during_training.md)
|
| 170 |
+
- [Tools and Utilities](./docs/tools.md)
|
| 171 |
+
- [Using torch.compile](./docs/torch_compile.md)
|
| 172 |
+
|
| 173 |
+
## Installation
|
| 174 |
+
|
| 175 |
+
### pip based installation
|
| 176 |
+
|
| 177 |
+
Python 3.10 or later is required (verified with 3.10).
|
| 178 |
+
|
| 179 |
+
Create a virtual environment and install PyTorch and torchvision matching your CUDA version.
|
| 180 |
+
|
| 181 |
+
PyTorch 2.5.1 or later is required (see [note](#PyTorch-version)).
|
| 182 |
+
|
| 183 |
+
```bash
|
| 184 |
+
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu124
|
| 185 |
+
```
|
| 186 |
+
|
| 187 |
+
Install the required dependencies using the following command.
|
| 188 |
+
|
| 189 |
+
```bash
|
| 190 |
+
pip install -e .
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
Optionally, you can use FlashAttention and SageAttention (**for inference only**; see [SageAttention Installation](#sageattention-installation) for installation instructions).
|
| 194 |
+
|
| 195 |
+
Optional dependencies for additional features:
|
| 196 |
+
- `ascii-magic`: Used for dataset verification
|
| 197 |
+
- `matplotlib`: Used for timestep visualization
|
| 198 |
+
- `tensorboard`: Used for logging training progress
|
| 199 |
+
- `prompt-toolkit`: Used for interactive prompt editing in Wan2.1 and FramePack inference scripts. If installed, it will be automatically used in interactive mode. Especially useful in Linux environments for easier prompt editing.
|
| 200 |
+
|
| 201 |
+
```bash
|
| 202 |
+
pip install ascii-magic matplotlib tensorboard prompt-toolkit
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
### uv based installation (experimental)
|
| 206 |
+
|
| 207 |
+
You can also install using uv, but installation with uv is experimental. Feedback is welcome.
|
| 208 |
+
|
| 209 |
+
1. Install uv (if not already present on your OS).
|
| 210 |
+
|
| 211 |
+
#### Linux/MacOS
|
| 212 |
+
|
| 213 |
+
```sh
|
| 214 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
Follow the instructions to add the uv path manually until you restart your session...
|
| 218 |
+
|
| 219 |
+
#### Windows
|
| 220 |
+
|
| 221 |
+
```powershell
|
| 222 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
Follow the instructions to add the uv path manually until you reboot your system... or just reboot your system at this point.
|
| 226 |
+
|
| 227 |
+
## Model Download
|
| 228 |
+
|
| 229 |
+
Model download procedures vary by architecture. Please refer to the architecture-specific documents in the [Documentation](#documentation) section for instructions.
|
| 230 |
+
|
| 231 |
+
## Usage
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
### Dataset Configuration
|
| 235 |
+
|
| 236 |
+
Please refer to [here](./docs/dataset_config.md).
|
| 237 |
+
|
| 238 |
+
### Pre-caching
|
| 239 |
+
|
| 240 |
+
Pre-caching procedures vary by architecture. Please refer to the architecture-specific documents in the [Documentation](#documentation) section for instructions.
|
| 241 |
+
|
| 242 |
+
### Configuration of Accelerate
|
| 243 |
+
|
| 244 |
+
Run `accelerate config` to configure Accelerate. Choose appropriate values for each question based on your environment (either input values directly or use arrow keys and enter to select; uppercase is default, so if the default value is fine, just press enter without inputting anything). For training with a single GPU, answer the questions as follows:
|
| 245 |
+
|
| 246 |
+
```txt
|
| 247 |
+
- In which compute environment are you running?: This machine
|
| 248 |
+
- Which type of machine are you using?: No distributed training
|
| 249 |
+
- Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)?[yes/NO]: NO
|
| 250 |
+
- Do you wish to optimize your script with torch dynamo?[yes/NO]: NO
|
| 251 |
+
- Do you want to use DeepSpeed? [yes/NO]: NO
|
| 252 |
+
- What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]: all
|
| 253 |
+
- Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: NO
|
| 254 |
+
- Do you wish to use mixed precision?: bf16
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
*Note*: In some cases, you may encounter the error `ValueError: fp16 mixed precision requires a GPU`. If this happens, answer "0" to the sixth question (`What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:`). This means that only the first GPU (id `0`) will be used.
|
| 258 |
+
|
| 259 |
+
### Training and Inference
|
| 260 |
+
|
| 261 |
+
Training and inference procedures vary significantly by architecture. Please refer to the architecture-specific documents in the [Documentation](#documentation) section and the various configuration documents for detailed instructions.
|
| 262 |
+
|
| 263 |
+
## Miscellaneous
|
| 264 |
+
|
| 265 |
+
### SageAttention Installation
|
| 266 |
+
|
| 267 |
+
sdbsd has provided a Windows-compatible SageAttention implementation and pre-built wheels here: https://github.com/sdbds/SageAttention-for-windows. After installing triton, if your Python, PyTorch, and CUDA versions match, you can download and install the pre-built wheel from the [Releases](https://github.com/sdbds/SageAttention-for-windows/releases) page. Thanks to sdbsd for this contribution.
|
| 268 |
+
|
| 269 |
+
For reference, the build and installation instructions are as follows. You may need to update Microsoft Visual C++ Redistributable to the latest version.
|
| 270 |
+
|
| 271 |
+
1. Download and install triton 3.1.0 wheel matching your Python version from [here](https://github.com/woct0rdho/triton-windows/releases/tag/v3.1.0-windows.post5).
|
| 272 |
+
|
| 273 |
+
2. Install Microsoft Visual Studio 2022 or Build Tools for Visual Studio 2022, configured for C++ builds.
|
| 274 |
+
|
| 275 |
+
3. Clone the SageAttention repository in your preferred directory:
|
| 276 |
+
```shell
|
| 277 |
+
git clone https://github.com/thu-ml/SageAttention.git
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
4. Open `x64 Native Tools Command Prompt for VS 2022` from the Start menu under Visual Studio 2022.
|
| 281 |
+
|
| 282 |
+
5. Activate your venv, navigate to the SageAttention folder, and run the following command. If you get a DISTUTILS not configured error, set `set DISTUTILS_USE_SDK=1` and try again:
|
| 283 |
+
```shell
|
| 284 |
+
python setup.py install
|
| 285 |
+
```
|
| 286 |
+
|
| 287 |
+
This completes the SageAttention installation.
|
| 288 |
+
|
| 289 |
+
### PyTorch version
|
| 290 |
+
|
| 291 |
+
If you specify `torch` for `--attn_mode`, use PyTorch 2.5.1 or later (earlier versions may result in black videos).
|
| 292 |
+
|
| 293 |
+
If you use an earlier version, use xformers or SageAttention.
|
| 294 |
+
|
| 295 |
+
## Disclaimer
|
| 296 |
+
|
| 297 |
+
This repository is unofficial and not affiliated with the official repositories of the supported architectures.
|
| 298 |
+
|
| 299 |
+
This repository is experimental and under active development. While we welcome community usage and feedback, please note:
|
| 300 |
+
|
| 301 |
+
- This is not intended for production use
|
| 302 |
+
- Features and APIs may change without notice
|
| 303 |
+
- Some functionalities are still experimental and may not work as expected
|
| 304 |
+
- Video training features are still under development
|
| 305 |
+
|
| 306 |
+
If you encounter any issues or bugs, please create an Issue in this repository with:
|
| 307 |
+
- A detailed description of the problem
|
| 308 |
+
- Steps to reproduce
|
| 309 |
+
- Your environment details (OS, GPU, VRAM, Python version, etc.)
|
| 310 |
+
- Any relevant error messages or logs
|
| 311 |
+
|
| 312 |
+
## Contributing
|
| 313 |
+
|
| 314 |
+
We welcome contributions! Please see [CONTRIBUTING.md](./CONTRIBUTING.md) for details.
|
| 315 |
+
|
| 316 |
+
## License
|
| 317 |
+
|
| 318 |
+
Code under the `hunyuan_model` directory is modified from [HunyuanVideo](https://github.com/Tencent/HunyuanVideo) and follows their license.
|
| 319 |
+
|
| 320 |
+
Code under the `hunyuan_video_1_5` directory is modified from [HunyuanVideo 1.5](https://github.com/Tencent-Hunyuan/HunyuanVideo-1.5) and follows their license.
|
| 321 |
+
|
| 322 |
+
Code under the `wan` directory is modified from [Wan2.1](https://github.com/Wan-Video/Wan2.1). The license is under the Apache License 2.0.
|
| 323 |
+
|
| 324 |
+
Code under the `frame_pack` directory is modified from [FramePack](https://github.com/lllyasviel/FramePack). The license is under the Apache License 2.0.
|
| 325 |
+
|
| 326 |
+
Other code is under the Apache License 2.0. Some code is copied and modified from Diffusers.
|
cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
caption_images_by_qwen_vl.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.caption_images_by_qwen_vl import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
convert_lora.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.convert_lora import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_2_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_2_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_2_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_2_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_2_generate_image.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_2_generate_image import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_2_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_2_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_kontext_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_kontext_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_kontext_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_kontext_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_kontext_generate_image.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_kontext_generate_image import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
flux_kontext_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.flux_kontext_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
fpack_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.fpack_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
fpack_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.fpack_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
fpack_generate_video.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.fpack_generate_video import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
fpack_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.fpack_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_1_5_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_1_5_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_1_5_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_1_5_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_1_5_generate_video.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_1_5_generate_video import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_1_5_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_1_5_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_generate_video.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_generate_video import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_train.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_train import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
hv_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.hv_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
kandinsky5_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.kandinsky5_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
kandinsky5_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.kandinsky5_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
kandinsky5_generate_video.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.kandinsky5_generate_video import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
kandinsky5_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.kandinsky5_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
lora_post_hoc_ema.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.lora_post_hoc_ema import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_cache_dino_features.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_cache_dino_features import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_generate_video.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_generate_video import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_merge_lora.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_merge_lora import main
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
if __name__ == "__main__":
|
| 5 |
+
main()
|
ltx2_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.ltx2_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
ltx2_train_slider.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from musubi_tuner.ltx2_train_slider import main
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
main()
|
pyproject.toml
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "musubi-tuner"
|
| 3 |
+
version = "0.2.15"
|
| 4 |
+
description = "Musubi Tuner by kohya_ss"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.10"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"accelerate==1.6.0",
|
| 9 |
+
"av==14.0.1",
|
| 10 |
+
"bitsandbytes",
|
| 11 |
+
"diffusers==0.32.1",
|
| 12 |
+
"einops==0.7.0",
|
| 13 |
+
"huggingface-hub==0.34.3",
|
| 14 |
+
"opencv-python==4.10.0.84",
|
| 15 |
+
"pillow>=11.3.0",
|
| 16 |
+
"safetensors>=0.4.5",
|
| 17 |
+
# "sageattention>=1.0.6",
|
| 18 |
+
"toml==0.10.2",
|
| 19 |
+
"tqdm==4.67.1",
|
| 20 |
+
"transformers==4.56.1",
|
| 21 |
+
"voluptuous==0.15.2",
|
| 22 |
+
# Wan2.1
|
| 23 |
+
"ftfy==6.3.1",
|
| 24 |
+
"easydict==1.13",
|
| 25 |
+
# FLUX.1 Kontext
|
| 26 |
+
"sentencepiece==0.2.1",
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
[project.optional-dependencies]
|
| 30 |
+
cu124 = [
|
| 31 |
+
"torch>=2.5.1",
|
| 32 |
+
"torchvision>=0.20.1",
|
| 33 |
+
]
|
| 34 |
+
cu128 = [
|
| 35 |
+
"torch>=2.7.1",
|
| 36 |
+
"torchvision>=0.22.1",
|
| 37 |
+
]
|
| 38 |
+
cu130 = [
|
| 39 |
+
"torch>=2.9.1",
|
| 40 |
+
"torchvision>=0.24.1",
|
| 41 |
+
]
|
| 42 |
+
gui = [
|
| 43 |
+
"gradio>=4.0.0, <6.0.0",
|
| 44 |
+
]
|
| 45 |
+
dashboard = [
|
| 46 |
+
"fastapi>=0.115.0",
|
| 47 |
+
"uvicorn[standard]>=0.32.0",
|
| 48 |
+
"sse-starlette>=2.0.0",
|
| 49 |
+
"pyarrow>=17.0.0",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
[tool.uv]
|
| 53 |
+
conflicts = [
|
| 54 |
+
[
|
| 55 |
+
{ extra = "cu124" },
|
| 56 |
+
{ extra = "cu128" },
|
| 57 |
+
{ extra = "cu130" },
|
| 58 |
+
],
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
[dependency-groups]
|
| 62 |
+
dev = [
|
| 63 |
+
"ascii-magic==2.3.0",
|
| 64 |
+
"matplotlib==3.10.0",
|
| 65 |
+
"tensorboard",
|
| 66 |
+
"prompt-toolkit==3.0.51",
|
| 67 |
+
"ruff>=0.12.10",
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
[build-system]
|
| 71 |
+
requires = ["hatchling"]
|
| 72 |
+
build-backend = "hatchling.build"
|
| 73 |
+
|
| 74 |
+
[tool.uv.sources]
|
| 75 |
+
torch = [
|
| 76 |
+
{ index = "pytorch-cu124", extra = "cu124" },
|
| 77 |
+
{ index = "pytorch-cu128", extra = "cu128" },
|
| 78 |
+
{ index = "pytorch-cu130", extra = "cu130" },
|
| 79 |
+
]
|
| 80 |
+
torchvision = [
|
| 81 |
+
{ index = "pytorch-cu124", extra = "cu124" },
|
| 82 |
+
{ index = "pytorch-cu128", extra = "cu128" },
|
| 83 |
+
{ index = "pytorch-cu130", extra = "cu130" },
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
[[tool.uv.index]]
|
| 87 |
+
name = "pytorch-cu124"
|
| 88 |
+
url = "https://download.pytorch.org/whl/cu124"
|
| 89 |
+
explicit = true
|
| 90 |
+
|
| 91 |
+
[[tool.uv.index]]
|
| 92 |
+
name = "pytorch-cu128"
|
| 93 |
+
url = "https://download.pytorch.org/whl/cu128"
|
| 94 |
+
explicit = true
|
| 95 |
+
|
| 96 |
+
[[tool.uv.index]]
|
| 97 |
+
name = "pytorch-cu130"
|
| 98 |
+
url = "https://download.pytorch.org/whl/cu130"
|
| 99 |
+
explicit = true
|
| 100 |
+
|
| 101 |
+
[tool.ruff]
|
| 102 |
+
line-length = 132
|
| 103 |
+
indent-width = 4
|
| 104 |
+
target-version = "py39"
|
| 105 |
+
|
| 106 |
+
lint.ignore = [
|
| 107 |
+
"E402", # module-import-not-at-top-of-file
|
| 108 |
+
"E712", # true-false-comparison
|
| 109 |
+
"E721", # type-comparison
|
| 110 |
+
"E722", # bare-except
|
| 111 |
+
"E731", # lambda-assignment
|
| 112 |
+
"E741", # ambiguous-variable-name
|
| 113 |
+
"F601", # multi-value-repeated-key-literal
|
| 114 |
+
"F841", # unused-variable
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
# Whether to enforce exclude and extend-exclude patterns, even for paths that are passed to Ruff explicitly
|
| 118 |
+
# This is used to ensure that certain files are always excluded via IDE formatters or external tool calls (CI, AI, etc.)
|
| 119 |
+
force-exclude = true
|
| 120 |
+
|
| 121 |
+
# Single-path or Relative patterns
|
| 122 |
+
# Extends the default exclude patterns documented at https://docs.astral.sh/ruff/settings/#exclude
|
| 123 |
+
extend-exclude = [
|
| 124 |
+
"src/musubi_tuner/flux/flux_models.py",
|
| 125 |
+
"src/musubi_tuner/frame_pack/hunyuan.py",
|
| 126 |
+
"src/musubi_tuner/frame_pack/hunyuan_video_packed.py",
|
| 127 |
+
"src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py",
|
| 128 |
+
"src/musubi_tuner/hunyuan_model/autoencoder_kl_causal_3d.py",
|
| 129 |
+
"src/musubi_tuner/hunyuan_model/pipeline_hunyuan_video.py",
|
| 130 |
+
"src/musubi_tuner/modules/scheduling_flow_match_discrete.py",
|
| 131 |
+
"src/musubi_tuner/modules/unet_causal_3d_blocks.py",
|
| 132 |
+
"src/musubi_tuner/wan/configs/__init__.py",
|
| 133 |
+
"src/musubi_tuner/wan/configs/shared_config.py",
|
| 134 |
+
"src/musubi_tuner/wan/configs/wan_i2v_14B.py",
|
| 135 |
+
"src/musubi_tuner/wan/configs/wan_i2v_A14B.py",
|
| 136 |
+
"src/musubi_tuner/wan/configs/wan_t2v_14B.py",
|
| 137 |
+
"src/musubi_tuner/wan/configs/wan_t2v_1_3B.py",
|
| 138 |
+
"src/musubi_tuner/wan/configs/wan_t2v_A14B.py",
|
| 139 |
+
"src/musubi_tuner/wan/modules/attention.py",
|
| 140 |
+
"src/musubi_tuner/wan/modules/clip.py",
|
| 141 |
+
"src/musubi_tuner/wan/modules/model.py",
|
| 142 |
+
"src/musubi_tuner/wan/modules/t5.py",
|
| 143 |
+
"src/musubi_tuner/wan/modules/tokenizers.py",
|
| 144 |
+
"src/musubi_tuner/wan/modules/vae.py",
|
| 145 |
+
"src/musubi_tuner/wan/modules/xlm_roberta.py",
|
| 146 |
+
"src/musubi_tuner/wan/utils/fm_solvers.py",
|
| 147 |
+
"src/musubi_tuner/wan/utils/fm_solvers_unipc.py",
|
| 148 |
+
"src/musubi_tuner/wan/utils/utils.py",
|
| 149 |
+
]
|
| 150 |
+
|
| 151 |
+
[tool.ruff.format]
|
| 152 |
+
quote-style = "double"
|
| 153 |
+
indent-style = "space"
|
| 154 |
+
skip-magic-trailing-comma = false
|
| 155 |
+
line-ending = "auto"
|
| 156 |
+
docstring-code-format = false
|
| 157 |
+
docstring-code-line-length = "dynamic"
|
| 158 |
+
|
| 159 |
+
[tool.ruff.lint.per-file-ignores]
|
| 160 |
+
# Ignore all rules for files with copyright headers from external sources
|
| 161 |
+
# https://docs.astral.sh/ruff/settings/#lint_per-file-ignores
|
| 162 |
+
# # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
|
| 163 |
+
# "__init__.py" = ["E402"]
|
| 164 |
+
# "path/to/file.py" = ["E402"]
|
| 165 |
+
# # Ignore `D` rules everywhere except for the `src/` directory.
|
| 166 |
+
# "!src/**.py" = ["D"]
|
| 167 |
+
"src/musubi_tuner/flux/flux_models.py" = ["ALL"]
|
| 168 |
+
"src/musubi_tuner/frame_pack/hunyuan.py" = ["ALL"]
|
| 169 |
+
"src/musubi_tuner/frame_pack/hunyuan_video_packed.py" = ["ALL"]
|
| 170 |
+
"src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py" = ["ALL"]
|
| 171 |
+
"src/musubi_tuner/hunyuan_model/autoencoder_kl_causal_3d.py" = ["ALL"]
|
| 172 |
+
"src/musubi_tuner/hunyuan_model/pipeline_hunyuan_video.py" = ["ALL"]
|
| 173 |
+
"src/musubi_tuner/modules/scheduling_flow_match_discrete.py" = ["ALL"]
|
| 174 |
+
"src/musubi_tuner/modules/unet_causal_3d_blocks.py" = ["ALL"]
|
| 175 |
+
"src/musubi_tuner/wan/configs/__init__.py" = ["ALL"]
|
| 176 |
+
"src/musubi_tuner/wan/configs/shared_config.py" = ["ALL"]
|
| 177 |
+
"src/musubi_tuner/wan/configs/wan_i2v_14B.py" = ["ALL"]
|
| 178 |
+
"src/musubi_tuner/wan/configs/wan_i2v_A14B.py" = ["ALL"]
|
| 179 |
+
"src/musubi_tuner/wan/configs/wan_t2v_14B.py" = ["ALL"]
|
| 180 |
+
"src/musubi_tuner/wan/configs/wan_t2v_1_3B.py" = ["ALL"]
|
| 181 |
+
"src/musubi_tuner/wan/configs/wan_t2v_A14B.py" = ["ALL"]
|
| 182 |
+
"src/musubi_tuner/wan/modules/attention.py" = ["ALL"]
|
| 183 |
+
"src/musubi_tuner/wan/modules/clip.py" = ["ALL"]
|
| 184 |
+
"src/musubi_tuner/wan/modules/model.py" = ["ALL"]
|
| 185 |
+
"src/musubi_tuner/wan/modules/t5.py" = ["ALL"]
|
| 186 |
+
"src/musubi_tuner/wan/modules/tokenizers.py" = ["ALL"]
|
| 187 |
+
"src/musubi_tuner/wan/modules/vae.py" = ["ALL"]
|
| 188 |
+
"src/musubi_tuner/wan/modules/xlm_roberta.py" = ["ALL"]
|
| 189 |
+
"src/musubi_tuner/wan/utils/fm_solvers.py" = ["ALL"]
|
| 190 |
+
"src/musubi_tuner/wan/utils/fm_solvers_unipc.py" = ["ALL"]
|
| 191 |
+
"src/musubi_tuner/wan/utils/utils.py" = ["ALL"]
|
qwen_extract_lora.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_extract_lora import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
qwen_image_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_image_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
qwen_image_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_image_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
qwen_image_generate_image.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.qwen_image_generate_image import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
wan_cache_latents.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.wan_cache_latents import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
wan_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.wan_cache_text_encoder_outputs import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
wan_train_network.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.wan_train_network import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|
zimage_train.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.zimage_train import main
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
main()
|