WensongSong commited on
Commit
abd08dc
·
1 Parent(s): 5c3ac12

init UniGeo demo

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DiffSynth-Studio/diffsynth/__init__.py +1 -0
  2. DiffSynth-Studio/diffsynth/__pycache__/__init__.cpython-39.pyc +0 -0
  3. DiffSynth-Studio/diffsynth/configs/__init__.py +2 -0
  4. DiffSynth-Studio/diffsynth/configs/__pycache__/__init__.cpython-39.pyc +0 -0
  5. DiffSynth-Studio/diffsynth/configs/__pycache__/model_configs.cpython-39.pyc +0 -0
  6. DiffSynth-Studio/diffsynth/configs/__pycache__/vram_management_module_maps.cpython-39.pyc +0 -0
  7. DiffSynth-Studio/diffsynth/configs/model_configs.py +594 -0
  8. DiffSynth-Studio/diffsynth/configs/vram_management_module_maps.py +213 -0
  9. DiffSynth-Studio/diffsynth/core/__init__.py +6 -0
  10. DiffSynth-Studio/diffsynth/core/__pycache__/__init__.cpython-39.pyc +0 -0
  11. DiffSynth-Studio/diffsynth/core/attention/__init__.py +1 -0
  12. DiffSynth-Studio/diffsynth/core/attention/__pycache__/__init__.cpython-39.pyc +0 -0
  13. DiffSynth-Studio/diffsynth/core/attention/__pycache__/attention.cpython-39.pyc +0 -0
  14. DiffSynth-Studio/diffsynth/core/attention/attention.py +121 -0
  15. DiffSynth-Studio/diffsynth/core/data/__init__.py +1 -0
  16. DiffSynth-Studio/diffsynth/core/data/__pycache__/__init__.cpython-39.pyc +0 -0
  17. DiffSynth-Studio/diffsynth/core/data/__pycache__/operators.cpython-39.pyc +0 -0
  18. DiffSynth-Studio/diffsynth/core/data/__pycache__/unified_dataset.cpython-39.pyc +0 -0
  19. DiffSynth-Studio/diffsynth/core/data/operators.py +238 -0
  20. DiffSynth-Studio/diffsynth/core/data/unified_dataset.py +105 -0
  21. DiffSynth-Studio/diffsynth/core/data/unified_dataset_old.py +139 -0
  22. DiffSynth-Studio/diffsynth/core/device/__init__.py +2 -0
  23. DiffSynth-Studio/diffsynth/core/device/__pycache__/__init__.cpython-39.pyc +0 -0
  24. DiffSynth-Studio/diffsynth/core/device/__pycache__/npu_compatible_device.cpython-39.pyc +0 -0
  25. DiffSynth-Studio/diffsynth/core/device/npu_compatible_device.py +107 -0
  26. DiffSynth-Studio/diffsynth/core/gradient/__init__.py +1 -0
  27. DiffSynth-Studio/diffsynth/core/gradient/__pycache__/__init__.cpython-39.pyc +0 -0
  28. DiffSynth-Studio/diffsynth/core/gradient/__pycache__/gradient_checkpoint.cpython-39.pyc +0 -0
  29. DiffSynth-Studio/diffsynth/core/gradient/gradient_checkpoint.py +34 -0
  30. DiffSynth-Studio/diffsynth/core/loader/__init__.py +3 -0
  31. DiffSynth-Studio/diffsynth/core/loader/__pycache__/__init__.cpython-39.pyc +0 -0
  32. DiffSynth-Studio/diffsynth/core/loader/__pycache__/config.cpython-39.pyc +0 -0
  33. DiffSynth-Studio/diffsynth/core/loader/__pycache__/file.cpython-39.pyc +0 -0
  34. DiffSynth-Studio/diffsynth/core/loader/__pycache__/model.cpython-39.pyc +0 -0
  35. DiffSynth-Studio/diffsynth/core/loader/config.py +122 -0
  36. DiffSynth-Studio/diffsynth/core/loader/file.py +121 -0
  37. DiffSynth-Studio/diffsynth/core/loader/model.py +83 -0
  38. DiffSynth-Studio/diffsynth/core/vram/__init__.py +2 -0
  39. DiffSynth-Studio/diffsynth/core/vram/__pycache__/__init__.cpython-39.pyc +0 -0
  40. DiffSynth-Studio/diffsynth/core/vram/__pycache__/disk_map.cpython-39.pyc +0 -0
  41. DiffSynth-Studio/diffsynth/core/vram/__pycache__/initialization.cpython-39.pyc +0 -0
  42. DiffSynth-Studio/diffsynth/core/vram/__pycache__/layers.cpython-39.pyc +0 -0
  43. DiffSynth-Studio/diffsynth/core/vram/disk_map.py +93 -0
  44. DiffSynth-Studio/diffsynth/core/vram/initialization.py +21 -0
  45. DiffSynth-Studio/diffsynth/core/vram/layers.py +479 -0
  46. DiffSynth-Studio/diffsynth/diffusion/__init__.py +6 -0
  47. DiffSynth-Studio/diffsynth/diffusion/__pycache__/__init__.cpython-39.pyc +0 -0
  48. DiffSynth-Studio/diffsynth/diffusion/__pycache__/base_pipeline.cpython-39.pyc +0 -0
  49. DiffSynth-Studio/diffsynth/diffusion/__pycache__/flow_match.cpython-39.pyc +0 -0
  50. DiffSynth-Studio/diffsynth/diffusion/__pycache__/logger.cpython-39.pyc +0 -0
DiffSynth-Studio/diffsynth/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .core import *
DiffSynth-Studio/diffsynth/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (167 Bytes). View file
 
DiffSynth-Studio/diffsynth/configs/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .model_configs import MODEL_CONFIGS
2
+ from .vram_management_module_maps import VRAM_MANAGEMENT_MODULE_MAPS
DiffSynth-Studio/diffsynth/configs/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (284 Bytes). View file
 
DiffSynth-Studio/diffsynth/configs/__pycache__/model_configs.cpython-39.pyc ADDED
Binary file (12.8 kB). View file
 
DiffSynth-Studio/diffsynth/configs/__pycache__/vram_management_module_maps.cpython-39.pyc ADDED
Binary file (6.28 kB). View file
 
DiffSynth-Studio/diffsynth/configs/model_configs.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ qwen_image_series = [
2
+ {
3
+ # Example: ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors")
4
+ "model_hash": "0319a1cb19835fb510907dd3367c95ff",
5
+ "model_name": "qwen_image_dit",
6
+ "model_class": "diffsynth.models.qwen_image_dit.QwenImageDiT",
7
+ },
8
+ {
9
+ # Example: ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors")
10
+ "model_hash": "8004730443f55db63092006dd9f7110e",
11
+ "model_name": "qwen_image_text_encoder",
12
+ "model_class": "diffsynth.models.qwen_image_text_encoder.QwenImageTextEncoder",
13
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.qwen_image_text_encoder.QwenImageTextEncoderStateDictConverter",
14
+ },
15
+ {
16
+ # Example: ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors")
17
+ "model_hash": "ed4ea5824d55ec3107b09815e318123a",
18
+ "model_name": "qwen_image_vae",
19
+ "model_class": "diffsynth.models.qwen_image_vae.QwenImageVAE",
20
+ },
21
+ {
22
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth", origin_file_pattern="model.safetensors")
23
+ "model_hash": "073bce9cf969e317e5662cd570c3e79c",
24
+ "model_name": "qwen_image_blockwise_controlnet",
25
+ "model_class": "diffsynth.models.qwen_image_controlnet.QwenImageBlockWiseControlNet",
26
+ },
27
+ {
28
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint", origin_file_pattern="model.safetensors")
29
+ "model_hash": "a9e54e480a628f0b956a688a81c33bab",
30
+ "model_name": "qwen_image_blockwise_controlnet",
31
+ "model_class": "diffsynth.models.qwen_image_controlnet.QwenImageBlockWiseControlNet",
32
+ "extra_kwargs": {"additional_in_dim": 4},
33
+ },
34
+ {
35
+ # Example: ModelConfig(model_id="DiffSynth-Studio/General-Image-Encoders", origin_file_pattern="SigLIP2-G384/model.safetensors")
36
+ "model_hash": "469c78b61e3e31bc9eec0d0af3d3f2f8",
37
+ "model_name": "siglip2_image_encoder",
38
+ "model_class": "diffsynth.models.siglip2_image_encoder.Siglip2ImageEncoder",
39
+ },
40
+ {
41
+ # Example: ModelConfig(model_id="DiffSynth-Studio/General-Image-Encoders", origin_file_pattern="DINOv3-7B/model.safetensors")
42
+ "model_hash": "5722b5c873720009de96422993b15682",
43
+ "model_name": "dinov3_image_encoder",
44
+ "model_class": "diffsynth.models.dinov3_image_encoder.DINOv3ImageEncoder",
45
+ },
46
+ {
47
+ # Example:
48
+ "model_hash": "a166c33455cdbd89c0888a3645ca5c0f",
49
+ "model_name": "qwen_image_image2lora_coarse",
50
+ "model_class": "diffsynth.models.qwen_image_image2lora.QwenImageImage2LoRAModel",
51
+ },
52
+ {
53
+ # Example:
54
+ "model_hash": "a5476e691767a4da6d3a6634a10f7408",
55
+ "model_name": "qwen_image_image2lora_fine",
56
+ "model_class": "diffsynth.models.qwen_image_image2lora.QwenImageImage2LoRAModel",
57
+ "extra_kwargs": {"residual_length": 37*37+7, "residual_mid_dim": 64}
58
+ },
59
+ {
60
+ # Example:
61
+ "model_hash": "0aad514690602ecaff932c701cb4b0bb",
62
+ "model_name": "qwen_image_image2lora_style",
63
+ "model_class": "diffsynth.models.qwen_image_image2lora.QwenImageImage2LoRAModel",
64
+ "extra_kwargs": {"compress_dim": 64, "use_residual": False}
65
+ },
66
+ {
67
+ # Example: ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors")
68
+ "model_hash": "8dc8cda05de16c73afa755e2c1ce2839",
69
+ "model_name": "qwen_image_dit",
70
+ "model_class": "diffsynth.models.qwen_image_dit.QwenImageDiT",
71
+ "extra_kwargs": {"use_layer3d_rope": True, "use_additional_t_cond": True}
72
+ },
73
+ {
74
+ # Example: ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors")
75
+ "model_hash": "44b39ddc499e027cfb24f7878d7416b9",
76
+ "model_name": "qwen_image_vae",
77
+ "model_class": "diffsynth.models.qwen_image_vae.QwenImageVAE",
78
+ "extra_kwargs": {"image_channels": 4}
79
+ },
80
+ ]
81
+
82
+ wan_series = [
83
+ {
84
+ # Example: ModelConfig(model_id="krea/krea-realtime-video", origin_file_pattern="krea-realtime-video-14b.safetensors")
85
+ "model_hash": "5ec04e02b42d2580483ad69f4e76346a",
86
+ "model_name": "wan_video_dit",
87
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
88
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 16, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06},
89
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_dit.WanVideoDiTStateDictConverter",
90
+ },
91
+ {
92
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth")
93
+ "model_hash": "9c8818c2cbea55eca56c7b447df170da",
94
+ "model_name": "wan_video_text_encoder",
95
+ "model_class": "diffsynth.models.wan_video_text_encoder.WanTextEncoder",
96
+ },
97
+ {
98
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth")
99
+ "model_hash": "ccc42284ea13e1ad04693284c7a09be6",
100
+ "model_name": "wan_video_vae",
101
+ "model_class": "diffsynth.models.wan_video_vae.WanVideoVAE",
102
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_vae.WanVideoVAEStateDictConverter",
103
+ },
104
+ {
105
+ # Example: ModelConfig(model_id="meituan-longcat/LongCat-Video", origin_file_pattern="dit/diffusion_pytorch_model*.safetensors")
106
+ "model_hash": "8b27900f680d7251ce44e2dc8ae1ffef",
107
+ "model_name": "wan_video_dit",
108
+ "model_class": "diffsynth.models.longcat_video_dit.LongCatVideoTransformer3DModel",
109
+ },
110
+ {
111
+ # Example: ModelConfig(model_id="ByteDance/Video-As-Prompt-Wan2.1-14B", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors")
112
+ "model_hash": "5f90e66a0672219f12d9a626c8c21f61",
113
+ "model_name": "wan_video_dit",
114
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
115
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06},
116
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_dit.WanVideoDiTFromDiffusers"
117
+ },
118
+ {
119
+ # Example: ModelConfig(model_id="ByteDance/Video-As-Prompt-Wan2.1-14B", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors")
120
+ "model_hash": "5f90e66a0672219f12d9a626c8c21f61",
121
+ "model_name": "wan_video_vap",
122
+ "model_class": "diffsynth.models.wan_video_mot.MotWanModel",
123
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_mot.WanVideoMotStateDictConverter"
124
+ },
125
+ {
126
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth")
127
+ "model_hash": "5941c53e207d62f20f9025686193c40b",
128
+ "model_name": "wan_video_image_encoder",
129
+ "model_class": "diffsynth.models.wan_video_image_encoder.WanImageEncoder",
130
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_image_encoder.WanImageEncoderStateDictConverter"
131
+ },
132
+ {
133
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors")
134
+ "model_hash": "dbd5ec76bbf977983f972c151d545389",
135
+ "model_name": "wan_video_motion_controller",
136
+ "model_class": "diffsynth.models.wan_video_motion_controller.WanMotionControllerModel",
137
+ },
138
+ {
139
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
140
+ "model_hash": "9269f8db9040a9d860eaca435be61814",
141
+ "model_name": "wan_video_dit",
142
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
143
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 16, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06}
144
+ },
145
+ {
146
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors")
147
+ "model_hash": "3ef3b1f8e1dab83d5b71fd7b617f859f",
148
+ "model_name": "wan_video_dit",
149
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
150
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'has_image_pos_emb': True}
151
+ },
152
+ {
153
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors")
154
+ "model_hash": "349723183fc063b2bfc10bb2835cf677",
155
+ "model_name": "wan_video_dit",
156
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
157
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 48, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06}
158
+ },
159
+ {
160
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors")
161
+ "model_hash": "6d6ccde6845b95ad9114ab993d917893",
162
+ "model_name": "wan_video_dit",
163
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
164
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06}
165
+ },
166
+ {
167
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors")
168
+ "model_hash": "efa44cddf936c70abd0ea28b6cbe946c",
169
+ "model_name": "wan_video_dit",
170
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
171
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 48, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06}
172
+ },
173
+ {
174
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors")
175
+ "model_hash": "6bfcfb3b342cb286ce886889d519a77e",
176
+ "model_name": "wan_video_dit",
177
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
178
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06}
179
+ },
180
+ {
181
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors")
182
+ "model_hash": "ac6a5aa74f4a0aab6f64eb9a72f19901",
183
+ "model_name": "wan_video_dit",
184
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
185
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 32, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06, 'has_ref_conv': False, 'add_control_adapter': True, 'in_dim_control_adapter': 24}
186
+ },
187
+ {
188
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors")
189
+ "model_hash": "70ddad9d3a133785da5ea371aae09504",
190
+ "model_name": "wan_video_dit",
191
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
192
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 48, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06, 'has_ref_conv': True}
193
+ },
194
+ {
195
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors")
196
+ "model_hash": "b61c605c2adbd23124d152ed28e049ae",
197
+ "model_name": "wan_video_dit",
198
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
199
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 32, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'has_ref_conv': False, 'add_control_adapter': True, 'in_dim_control_adapter': 24}
200
+ },
201
+ {
202
+ # Example: ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors")
203
+ "model_hash": "26bde73488a92e64cc20b0a7485b9e5b",
204
+ "model_name": "wan_video_dit",
205
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
206
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 48, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'has_ref_conv': True}
207
+ },
208
+ {
209
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
210
+ "model_hash": "aafcfd9672c3a2456dc46e1cb6e52c70",
211
+ "model_name": "wan_video_dit",
212
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
213
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 16, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06}
214
+ },
215
+ {
216
+ # Example: ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors")
217
+ "model_hash": "a61453409b67cd3246cf0c3bebad47ba",
218
+ "model_name": "wan_video_dit",
219
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
220
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 16, 'dim': 1536, 'ffn_dim': 8960, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 12, 'num_layers': 30, 'eps': 1e-06},
221
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_dit.WanVideoDiTStateDictConverter",
222
+ },
223
+ {
224
+ # Example: ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors")
225
+ "model_hash": "a61453409b67cd3246cf0c3bebad47ba",
226
+ "model_name": "wan_video_vace",
227
+ "model_class": "diffsynth.models.wan_video_vace.VaceWanModel",
228
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_vace.VaceWanModelDictConverter"
229
+ },
230
+ {
231
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
232
+ "model_hash": "7a513e1f257a861512b1afd387a8ecd9",
233
+ "model_name": "wan_video_dit",
234
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
235
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 16, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06},
236
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_dit.WanVideoDiTStateDictConverter",
237
+ },
238
+ {
239
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
240
+ "model_hash": "7a513e1f257a861512b1afd387a8ecd9",
241
+ "model_name": "wan_video_vace",
242
+ "model_class": "diffsynth.models.wan_video_vace.VaceWanModel",
243
+ "extra_kwargs": {'vace_layers': (0, 5, 10, 15, 20, 25, 30, 35), 'vace_in_dim': 96, 'patch_size': (1, 2, 2), 'has_image_input': False, 'dim': 5120, 'num_heads': 40, 'ffn_dim': 13824, 'eps': 1e-06},
244
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_vace.VaceWanModelDictConverter"
245
+ },
246
+ {
247
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-Animate-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
248
+ "model_hash": "31fa352acb8a1b1d33cd8764273d80a2",
249
+ "model_name": "wan_video_dit",
250
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
251
+ "extra_kwargs": {'has_image_input': True, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06},
252
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_dit.WanVideoDiTStateDictConverter"
253
+ },
254
+ {
255
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-Animate-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
256
+ "model_hash": "31fa352acb8a1b1d33cd8764273d80a2",
257
+ "model_name": "wan_video_animate_adapter",
258
+ "model_class": "diffsynth.models.wan_video_animate_adapter.WanAnimateAdapter",
259
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_animate_adapter.WanAnimateAdapterStateDictConverter"
260
+ },
261
+ {
262
+ # Example: ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control-Camera", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors")
263
+ "model_hash": "47dbeab5e560db3180adf51dc0232fb1",
264
+ "model_name": "wan_video_dit",
265
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
266
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'has_ref_conv': False, 'add_control_adapter': True, 'in_dim_control_adapter': 24, 'require_clip_embedding': False}
267
+ },
268
+ {
269
+ # Example: ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors")
270
+ "model_hash": "2267d489f0ceb9f21836532952852ee5",
271
+ "model_name": "wan_video_dit",
272
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
273
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 52, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'has_ref_conv': True, 'require_clip_embedding': False},
274
+ },
275
+ {
276
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors")
277
+ "model_hash": "5b013604280dd715f8457c6ed6d6a626",
278
+ "model_name": "wan_video_dit",
279
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
280
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 36, 'dim': 5120, 'ffn_dim': 13824, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 16, 'num_heads': 40, 'num_layers': 40, 'eps': 1e-06, 'require_clip_embedding': False}
281
+ },
282
+ {
283
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
284
+ "model_hash": "966cffdcc52f9c46c391768b27637614",
285
+ "model_name": "wan_video_dit",
286
+ "model_class": "diffsynth.models.wan_video_dit_s2v.WanS2VModel",
287
+ "extra_kwargs": {'dim': 5120, 'in_dim': 16, 'ffn_dim': 13824, 'out_dim': 16, 'text_dim': 4096, 'freq_dim': 256, 'eps': 1e-06, 'patch_size': (1, 2, 2), 'num_heads': 40, 'num_layers': 40, 'cond_dim': 16, 'audio_dim': 1024, 'num_audio_token': 4}
288
+ },
289
+ {
290
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
291
+ "model_hash": "1f5ab7703c6fc803fdded85ff040c316",
292
+ "model_name": "wan_video_dit",
293
+ "model_class": "diffsynth.models.wan_video_dit.WanModel",
294
+ "extra_kwargs": {'has_image_input': False, 'patch_size': [1, 2, 2], 'in_dim': 48, 'dim': 3072, 'ffn_dim': 14336, 'freq_dim': 256, 'text_dim': 4096, 'out_dim': 48, 'num_heads': 24, 'num_layers': 30, 'eps': 1e-06, 'seperated_timestep': True, 'require_clip_embedding': False, 'require_vae_embedding': False, 'fuse_vae_embedding_in_latents': True}
295
+ },
296
+ {
297
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="Wan2.2_VAE.pth")
298
+ "model_hash": "e1de6c02cdac79f8b739f4d3698cd216",
299
+ "model_name": "wan_video_vae",
300
+ "model_class": "diffsynth.models.wan_video_vae.WanVideoVAE38",
301
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wan_video_vae.WanVideoVAEStateDictConverter",
302
+ },
303
+ {
304
+ # Example: ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/model.safetensors")
305
+ "model_hash": "06be60f3a4526586d8431cd038a71486",
306
+ "model_name": "wans2v_audio_encoder",
307
+ "model_class": "diffsynth.models.wav2vec.WanS2VAudioEncoder",
308
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.wans2v_audio_encoder.WanS2VAudioEncoderStateDictConverter",
309
+ },
310
+ ]
311
+
312
+ flux_series = [
313
+ {
314
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors")
315
+ "model_hash": "a29710fea6dddb0314663ee823598e50",
316
+ "model_name": "flux_dit",
317
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
318
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
319
+ },
320
+ {
321
+ # Supported due to historical reasons.
322
+ "model_hash": "605c56eab23e9e2af863ad8f0813a25d",
323
+ "model_name": "flux_dit",
324
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
325
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverterFromDiffusers",
326
+ },
327
+ {
328
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors")
329
+ "model_hash": "94eefa3dac9cec93cb1ebaf1747d7b78",
330
+ "model_name": "flux_text_encoder_clip",
331
+ "model_class": "diffsynth.models.flux_text_encoder_clip.FluxTextEncoderClip",
332
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_text_encoder_clip.FluxTextEncoderClipStateDictConverter",
333
+ },
334
+ {
335
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/*.safetensors")
336
+ "model_hash": "22540b49eaedbc2f2784b2091a234c7c",
337
+ "model_name": "flux_text_encoder_t5",
338
+ "model_class": "diffsynth.models.flux_text_encoder_t5.FluxTextEncoderT5",
339
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_text_encoder_t5.FluxTextEncoderT5StateDictConverter",
340
+ },
341
+ {
342
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors")
343
+ "model_hash": "21ea55f476dfc4fd135587abb59dfe5d",
344
+ "model_name": "flux_vae_encoder",
345
+ "model_class": "diffsynth.models.flux_vae.FluxVAEEncoder",
346
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_vae.FluxVAEEncoderStateDictConverter",
347
+ },
348
+ {
349
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors")
350
+ "model_hash": "21ea55f476dfc4fd135587abb59dfe5d",
351
+ "model_name": "flux_vae_decoder",
352
+ "model_class": "diffsynth.models.flux_vae.FluxVAEDecoder",
353
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_vae.FluxVAEDecoderStateDictConverter",
354
+ },
355
+ {
356
+ # Example: ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors")
357
+ "model_hash": "d02f41c13549fa5093d3521f62a5570a",
358
+ "model_name": "flux_dit",
359
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
360
+ "extra_kwargs": {'input_dim': 196, 'num_blocks': 8},
361
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
362
+ },
363
+ {
364
+ # Example: ModelConfig(model_id="DiffSynth-Studio/AttriCtrl-FLUX.1-Dev", origin_file_pattern="models/brightness.safetensors")
365
+ "model_hash": "0629116fce1472503a66992f96f3eb1a",
366
+ "model_name": "flux_value_controller",
367
+ "model_class": "diffsynth.models.flux_value_control.SingleValueEncoder",
368
+ },
369
+ {
370
+ # Example: ModelConfig(model_id="alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", origin_file_pattern="diffusion_pytorch_model.safetensors")
371
+ "model_hash": "52357cb26250681367488a8954c271e8",
372
+ "model_name": "flux_controlnet",
373
+ "model_class": "diffsynth.models.flux_controlnet.FluxControlNet",
374
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_controlnet.FluxControlNetStateDictConverter",
375
+ "extra_kwargs": {"num_joint_blocks": 6, "num_single_blocks": 0, "additional_input_dim": 4},
376
+ },
377
+ {
378
+ # Example: ModelConfig(model_id="InstantX/FLUX.1-dev-Controlnet-Union-alpha", origin_file_pattern="diffusion_pytorch_model.safetensors")
379
+ "model_hash": "78d18b9101345ff695f312e7e62538c0",
380
+ "model_name": "flux_controlnet",
381
+ "model_class": "diffsynth.models.flux_controlnet.FluxControlNet",
382
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_controlnet.FluxControlNetStateDictConverter",
383
+ "extra_kwargs": {"num_mode": 10, "mode_dict": {"canny": 0, "tile": 1, "depth": 2, "blur": 3, "pose": 4, "gray": 5, "lq": 6}},
384
+ },
385
+ {
386
+ # Example: ModelConfig(model_id="jasperai/Flux.1-dev-Controlnet-Upscaler", origin_file_pattern="diffusion_pytorch_model.safetensors")
387
+ "model_hash": "b001c89139b5f053c715fe772362dd2a",
388
+ "model_name": "flux_controlnet",
389
+ "model_class": "diffsynth.models.flux_controlnet.FluxControlNet",
390
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_controlnet.FluxControlNetStateDictConverter",
391
+ "extra_kwargs": {"num_single_blocks": 0},
392
+ },
393
+ {
394
+ # Example: ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/image_proj_model.bin")
395
+ "model_hash": "c07c0f04f5ff55e86b4e937c7a40d481",
396
+ "model_name": "infiniteyou_image_projector",
397
+ "model_class": "diffsynth.models.flux_infiniteyou.InfiniteYouImageProjector",
398
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_infiniteyou.FluxInfiniteYouImageProjectorStateDictConverter",
399
+ },
400
+ {
401
+ # Example: ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors")
402
+ "model_hash": "7f9583eb8ba86642abb9a21a4b2c9e16",
403
+ "model_name": "flux_controlnet",
404
+ "model_class": "diffsynth.models.flux_controlnet.FluxControlNet",
405
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_controlnet.FluxControlNetStateDictConverter",
406
+ "extra_kwargs": {"num_joint_blocks": 4, "num_single_blocks": 10},
407
+ },
408
+ {
409
+ # Example: ModelConfig(model_id="DiffSynth-Studio/LoRA-Encoder-FLUX.1-Dev", origin_file_pattern="model.safetensors")
410
+ "model_hash": "77c2e4dd2440269eb33bfaa0d004f6ab",
411
+ "model_name": "flux_lora_encoder",
412
+ "model_class": "diffsynth.models.flux_lora_encoder.FluxLoRAEncoder",
413
+ },
414
+ {
415
+ # Example: ModelConfig(model_id="DiffSynth-Studio/LoRAFusion-preview-FLUX.1-dev", origin_file_pattern="model.safetensors")
416
+ "model_hash": "30143afb2dea73d1ac580e0787628f8c",
417
+ "model_name": "flux_lora_patcher",
418
+ "model_class": "diffsynth.models.flux_lora_patcher.FluxLoraPatcher",
419
+ },
420
+ {
421
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="model*.safetensors")
422
+ "model_hash": "2bd19e845116e4f875a0a048e27fc219",
423
+ "model_name": "nexus_gen_llm",
424
+ "model_class": "diffsynth.models.nexus_gen.NexusGenAutoregressiveModel",
425
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.nexus_gen.NexusGenAutoregressiveModelStateDictConverter",
426
+ },
427
+ {
428
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="edit_decoder.bin")
429
+ "model_hash": "63c969fd37cce769a90aa781fbff5f81",
430
+ "model_name": "nexus_gen_editing_adapter",
431
+ "model_class": "diffsynth.models.nexus_gen_projector.NexusGenImageEmbeddingMerger",
432
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.nexus_gen_projector.NexusGenMergerStateDictConverter",
433
+ },
434
+ {
435
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="edit_decoder.bin")
436
+ "model_hash": "63c969fd37cce769a90aa781fbff5f81",
437
+ "model_name": "flux_dit",
438
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
439
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
440
+ },
441
+ {
442
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="generation_decoder.bin")
443
+ "model_hash": "3e6c61b0f9471135fc9c6d6a98e98b6d",
444
+ "model_name": "nexus_gen_generation_adapter",
445
+ "model_class": "diffsynth.models.nexus_gen_projector.NexusGenAdapter",
446
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.nexus_gen_projector.NexusGenAdapterStateDictConverter",
447
+ },
448
+ {
449
+ # Example: ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="generation_decoder.bin")
450
+ "model_hash": "3e6c61b0f9471135fc9c6d6a98e98b6d",
451
+ "model_name": "flux_dit",
452
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
453
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
454
+ },
455
+ {
456
+ # Example: ModelConfig(model_id="InstantX/FLUX.1-dev-IP-Adapter", origin_file_pattern="ip-adapter.bin")
457
+ "model_hash": "4daaa66cc656a8fe369908693dad0a35",
458
+ "model_name": "flux_ipadapter",
459
+ "model_class": "diffsynth.models.flux_ipadapter.FluxIpAdapter",
460
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_ipadapter.FluxIpAdapterStateDictConverter",
461
+ },
462
+ {
463
+ # Example: ModelConfig(model_id="google/siglip-so400m-patch14-384", origin_file_pattern="model.safetensors")
464
+ "model_hash": "04d8c1e20a1f1b25f7434f111992a33f",
465
+ "model_name": "siglip_vision_model",
466
+ "model_class": "diffsynth.models.flux_ipadapter.SiglipVisionModelSO400M",
467
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_ipadapter.SiglipStateDictConverter",
468
+ },
469
+ {
470
+ # Example: ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="step1x-edit-i1258.safetensors"),
471
+ "model_hash": "d30fb9e02b1dbf4e509142f05cf7dd50",
472
+ "model_name": "step1x_connector",
473
+ "model_class": "diffsynth.models.step1x_connector.Qwen2Connector",
474
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.step1x_connector.Qwen2ConnectorStateDictConverter",
475
+ },
476
+ {
477
+ # Example: ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="step1x-edit-i1258.safetensors"),
478
+ "model_hash": "d30fb9e02b1dbf4e509142f05cf7dd50",
479
+ "model_name": "flux_dit",
480
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
481
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
482
+ "extra_kwargs": {"disable_guidance_embedder": True},
483
+ },
484
+ {
485
+ # Example: ModelConfig(model_id="MAILAND/majicflus_v1", origin_file_pattern="majicflus_v134.safetensors")
486
+ "model_hash": "3394f306c4cbf04334b712bf5aaed95f",
487
+ "model_name": "flux_dit",
488
+ "model_class": "diffsynth.models.flux_dit.FluxDiT",
489
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_dit.FluxDiTStateDictConverter",
490
+ },
491
+ ]
492
+
493
+ flux2_series = [
494
+ {
495
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-dev", origin_file_pattern="text_encoder/*.safetensors")
496
+ "model_hash": "28fca3d8e5bf2a2d1271748a773f6757",
497
+ "model_name": "flux2_text_encoder",
498
+ "model_class": "diffsynth.models.flux2_text_encoder.Flux2TextEncoder",
499
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux2_text_encoder.Flux2TextEncoderStateDictConverter",
500
+ },
501
+ {
502
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-dev", origin_file_pattern="transformer/*.safetensors")
503
+ "model_hash": "d38e1d5c5aec3b0a11e79327ac6e3b0f",
504
+ "model_name": "flux2_dit",
505
+ "model_class": "diffsynth.models.flux2_dit.Flux2DiT",
506
+ },
507
+ {
508
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-dev", origin_file_pattern="vae/diffusion_pytorch_model.safetensors")
509
+ "model_hash": "c54288e3ee12ca215898840682337b95",
510
+ "model_name": "flux2_vae",
511
+ "model_class": "diffsynth.models.flux2_vae.Flux2VAE",
512
+ },
513
+ {
514
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="transformer/*.safetensors")
515
+ "model_hash": "3bde7b817fec8143028b6825a63180df",
516
+ "model_name": "flux2_dit",
517
+ "model_class": "diffsynth.models.flux2_dit.Flux2DiT",
518
+ "extra_kwargs": {"guidance_embeds": False, "joint_attention_dim": 7680, "num_attention_heads": 24, "num_layers": 5, "num_single_layers": 20}
519
+ },
520
+ {
521
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-klein-9B", origin_file_pattern="text_encoder/*.safetensors")
522
+ "model_hash": "9195f3ea256fcd0ae6d929c203470754",
523
+ "model_name": "z_image_text_encoder",
524
+ "model_class": "diffsynth.models.z_image_text_encoder.ZImageTextEncoder",
525
+ "extra_kwargs": {"model_size": "8B"},
526
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.z_image_text_encoder.ZImageTextEncoderStateDictConverter",
527
+ },
528
+ {
529
+ # Example: ModelConfig(model_id="black-forest-labs/FLUX.2-klein-9B", origin_file_pattern="transformer/*.safetensors")
530
+ "model_hash": "39c6fc48f07bebecedbbaa971ff466c8",
531
+ "model_name": "flux2_dit",
532
+ "model_class": "diffsynth.models.flux2_dit.Flux2DiT",
533
+ "extra_kwargs": {"guidance_embeds": False, "joint_attention_dim": 12288, "num_attention_heads": 32, "num_layers": 8, "num_single_layers": 24}
534
+ },
535
+ ]
536
+
537
+ z_image_series = [
538
+ {
539
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="transformer/*.safetensors")
540
+ "model_hash": "fc3a8a1247fe185ce116ccbe0e426c28",
541
+ "model_name": "z_image_dit",
542
+ "model_class": "diffsynth.models.z_image_dit.ZImageDiT",
543
+ },
544
+ {
545
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="text_encoder/*.safetensors")
546
+ "model_hash": "0f050f62a88876fea6eae0a18dac5a2e",
547
+ "model_name": "z_image_text_encoder",
548
+ "model_class": "diffsynth.models.z_image_text_encoder.ZImageTextEncoder",
549
+ },
550
+ {
551
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/vae/diffusion_pytorch_model.safetensors")
552
+ "model_hash": "1aafa3cc91716fb6b300cc1cd51b85a3",
553
+ "model_name": "flux_vae_encoder",
554
+ "model_class": "diffsynth.models.flux_vae.FluxVAEEncoder",
555
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_vae.FluxVAEEncoderStateDictConverterDiffusers",
556
+ "extra_kwargs": {"use_conv_attention": False},
557
+ },
558
+ {
559
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Turbo", origin_file_pattern="vae/vae/diffusion_pytorch_model.safetensors")
560
+ "model_hash": "1aafa3cc91716fb6b300cc1cd51b85a3",
561
+ "model_name": "flux_vae_decoder",
562
+ "model_class": "diffsynth.models.flux_vae.FluxVAEDecoder",
563
+ "state_dict_converter": "diffsynth.utils.state_dict_converters.flux_vae.FluxVAEDecoderStateDictConverterDiffusers",
564
+ "extra_kwargs": {"use_conv_attention": False},
565
+ },
566
+ {
567
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Omni-Base", origin_file_pattern="transformer/*.safetensors")
568
+ "model_hash": "aa3563718e5c3ecde3dfbb020ca61180",
569
+ "model_name": "z_image_dit",
570
+ "model_class": "diffsynth.models.z_image_dit.ZImageDiT",
571
+ "extra_kwargs": {"siglip_feat_dim": 1152},
572
+ },
573
+ {
574
+ # Example: ModelConfig(model_id="Tongyi-MAI/Z-Image-Omni-Base", origin_file_pattern="siglip/model.safetensors")
575
+ "model_hash": "89d48e420f45cff95115a9f3e698d44a",
576
+ "model_name": "siglip_vision_model_428m",
577
+ "model_class": "diffsynth.models.siglip2_image_encoder.Siglip2ImageEncoder428M",
578
+ },
579
+ {
580
+ # Example: ModelConfig(model_id="PAI/Z-Image-Turbo-Fun-Controlnet-Union-2.1", origin_file_pattern="Z-Image-Turbo-Fun-Controlnet-Union-2.1-8steps.safetensors")
581
+ "model_hash": "1677708d40029ab380a95f6c731a57d7",
582
+ "model_name": "z_image_controlnet",
583
+ "model_class": "diffsynth.models.z_image_controlnet.ZImageControlNet",
584
+ },
585
+ {
586
+ # Example: ???
587
+ "model_hash": "9510cb8cd1dd34ee0e4f111c24905510",
588
+ "model_name": "z_image_image2lora_style",
589
+ "model_class": "diffsynth.models.z_image_image2lora.ZImageImage2LoRAModel",
590
+ "extra_kwargs": {"compress_dim": 128},
591
+ },
592
+ ]
593
+
594
+ MODEL_CONFIGS = qwen_image_series + wan_series + flux_series + flux2_series + z_image_series
DiffSynth-Studio/diffsynth/configs/vram_management_module_maps.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flux_general_vram_config = {
2
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
3
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
4
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
5
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
6
+ "torch.nn.GroupNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
7
+ "diffsynth.models.general_modules.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
8
+ "diffsynth.models.flux_lora_encoder.LoRALayerBlock": "diffsynth.core.vram.layers.AutoWrappedModule",
9
+ "diffsynth.models.flux_lora_patcher.LoraMerger": "diffsynth.core.vram.layers.AutoWrappedModule",
10
+ }
11
+
12
+ VRAM_MANAGEMENT_MODULE_MAPS = {
13
+ "diffsynth.models.qwen_image_dit.QwenImageDiT": {
14
+ "diffsynth.models.qwen_image_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
15
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
16
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
17
+ },
18
+ "diffsynth.models.qwen_image_text_encoder.QwenImageTextEncoder": {
19
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
20
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
21
+ "transformers.models.qwen2_5_vl.modeling_qwen2_5_vl.Qwen2_5_VLRotaryEmbedding": "diffsynth.core.vram.layers.AutoWrappedModule",
22
+ "transformers.models.qwen2_5_vl.modeling_qwen2_5_vl.Qwen2RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
23
+ "transformers.models.qwen2_5_vl.modeling_qwen2_5_vl.Qwen2_5_VisionPatchEmbed": "diffsynth.core.vram.layers.AutoWrappedModule",
24
+ "transformers.models.qwen2_5_vl.modeling_qwen2_5_vl.Qwen2_5_VisionRotaryEmbedding": "diffsynth.core.vram.layers.AutoWrappedModule",
25
+ },
26
+ "diffsynth.models.qwen_image_vae.QwenImageVAE": {
27
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
28
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
29
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
30
+ "diffsynth.models.qwen_image_vae.QwenImageRMS_norm": "diffsynth.core.vram.layers.AutoWrappedModule",
31
+ },
32
+ "diffsynth.models.qwen_image_controlnet.BlockWiseControlBlock": {
33
+ "diffsynth.models.qwen_image_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
34
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
35
+ },
36
+ "diffsynth.models.siglip2_image_encoder.Siglip2ImageEncoder": {
37
+ "transformers.models.siglip.modeling_siglip.SiglipVisionEmbeddings": "diffsynth.core.vram.layers.AutoWrappedModule",
38
+ "transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead": "diffsynth.core.vram.layers.AutoWrappedModule",
39
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
40
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
41
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
42
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
43
+ },
44
+ "diffsynth.models.dinov3_image_encoder.DINOv3ImageEncoder": {
45
+ "transformers.models.dinov3_vit.modeling_dinov3_vit.DINOv3ViTLayerScale": "diffsynth.core.vram.layers.AutoWrappedModule",
46
+ "transformers.models.dinov3_vit.modeling_dinov3_vit.DINOv3ViTRopePositionEmbedding": "diffsynth.core.vram.layers.AutoWrappedModule",
47
+ "transformers.models.dinov3_vit.modeling_dinov3_vit.DINOv3ViTEmbeddings": "diffsynth.core.vram.layers.AutoWrappedModule",
48
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
49
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
50
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
51
+ },
52
+ "diffsynth.models.qwen_image_image2lora.QwenImageImage2LoRAModel": {
53
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
54
+ },
55
+ "diffsynth.models.wan_video_animate_adapter.WanAnimateAdapter": {
56
+ "diffsynth.models.wan_video_animate_adapter.FaceEncoder": "diffsynth.core.vram.layers.AutoWrappedModule",
57
+ "diffsynth.models.wan_video_animate_adapter.EqualLinear": "diffsynth.core.vram.layers.AutoWrappedModule",
58
+ "diffsynth.models.wan_video_animate_adapter.ConvLayer": "diffsynth.core.vram.layers.AutoWrappedModule",
59
+ "diffsynth.models.wan_video_animate_adapter.FusedLeakyReLU": "diffsynth.core.vram.layers.AutoWrappedModule",
60
+ "diffsynth.models.wan_video_animate_adapter.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
61
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
62
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
63
+ "torch.nn.Conv1d": "diffsynth.core.vram.layers.AutoWrappedModule",
64
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
65
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
66
+ },
67
+ "diffsynth.models.wan_video_dit_s2v.WanS2VModel": {
68
+ "diffsynth.models.wan_video_dit.Head": "diffsynth.core.vram.layers.AutoWrappedModule",
69
+ "diffsynth.models.wan_video_dit_s2v.WanS2VDiTBlock": "diffsynth.core.vram.layers.AutoWrappedModule",
70
+ "diffsynth.models.wan_video_dit_s2v.CausalAudioEncoder": "diffsynth.core.vram.layers.AutoWrappedModule",
71
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
72
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
73
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
74
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
75
+ "diffsynth.models.wan_video_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
76
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
77
+ },
78
+ "diffsynth.models.wan_video_dit.WanModel": {
79
+ "diffsynth.models.wan_video_dit.MLP": "diffsynth.core.vram.layers.AutoWrappedModule",
80
+ "diffsynth.models.wan_video_dit.DiTBlock": "diffsynth.core.vram.layers.AutoWrappedNonRecurseModule",
81
+ "diffsynth.models.wan_video_dit.Head": "diffsynth.core.vram.layers.AutoWrappedModule",
82
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
83
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
84
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
85
+ "diffsynth.models.wan_video_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
86
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
87
+ },
88
+ "diffsynth.models.wan_video_image_encoder.WanImageEncoder": {
89
+ "diffsynth.models.wan_video_image_encoder.VisionTransformer": "diffsynth.core.vram.layers.AutoWrappedModule",
90
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
91
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
92
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
93
+ },
94
+ "diffsynth.models.wan_video_mot.MotWanModel": {
95
+ "diffsynth.models.wan_video_mot.MotWanAttentionBlock": "diffsynth.core.vram.layers.AutoWrappedModule",
96
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
97
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
98
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
99
+ },
100
+ "diffsynth.models.wan_video_motion_controller.WanMotionControllerModel": {
101
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
102
+ },
103
+ "diffsynth.models.wan_video_text_encoder.WanTextEncoder": {
104
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
105
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
106
+ "diffsynth.models.wan_video_text_encoder.T5RelativeEmbedding": "diffsynth.core.vram.layers.AutoWrappedModule",
107
+ "diffsynth.models.wan_video_text_encoder.T5LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
108
+ },
109
+ "diffsynth.models.wan_video_vace.VaceWanModel": {
110
+ "diffsynth.models.wan_video_dit.DiTBlock": "diffsynth.core.vram.layers.AutoWrappedModule",
111
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
112
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
113
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
114
+ "diffsynth.models.wan_video_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
115
+ },
116
+ "diffsynth.models.wan_video_vae.WanVideoVAE": {
117
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
118
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
119
+ "diffsynth.models.wan_video_vae.RMS_norm": "diffsynth.core.vram.layers.AutoWrappedModule",
120
+ "diffsynth.models.wan_video_vae.CausalConv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
121
+ "diffsynth.models.wan_video_vae.Upsample": "diffsynth.core.vram.layers.AutoWrappedModule",
122
+ "torch.nn.SiLU": "diffsynth.core.vram.layers.AutoWrappedModule",
123
+ "torch.nn.Dropout": "diffsynth.core.vram.layers.AutoWrappedModule",
124
+ },
125
+ "diffsynth.models.wan_video_vae.WanVideoVAE38": {
126
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
127
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
128
+ "diffsynth.models.wan_video_vae.RMS_norm": "diffsynth.core.vram.layers.AutoWrappedModule",
129
+ "diffsynth.models.wan_video_vae.CausalConv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
130
+ "diffsynth.models.wan_video_vae.Upsample": "diffsynth.core.vram.layers.AutoWrappedModule",
131
+ "torch.nn.SiLU": "diffsynth.core.vram.layers.AutoWrappedModule",
132
+ "torch.nn.Dropout": "diffsynth.core.vram.layers.AutoWrappedModule",
133
+ },
134
+ "diffsynth.models.wav2vec.WanS2VAudioEncoder": {
135
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
136
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
137
+ "torch.nn.Conv1d": "diffsynth.core.vram.layers.AutoWrappedModule",
138
+ },
139
+ "diffsynth.models.longcat_video_dit.LongCatVideoTransformer3DModel": {
140
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
141
+ "torch.nn.Conv3d": "diffsynth.core.vram.layers.AutoWrappedModule",
142
+ "diffsynth.models.longcat_video_dit.RMSNorm_FP32": "diffsynth.core.vram.layers.AutoWrappedModule",
143
+ "diffsynth.models.longcat_video_dit.LayerNorm_FP32": "diffsynth.core.vram.layers.AutoWrappedModule",
144
+ },
145
+ "diffsynth.models.flux_dit.FluxDiT": {
146
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
147
+ "diffsynth.models.flux_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
148
+ },
149
+ "diffsynth.models.flux_text_encoder_clip.FluxTextEncoderClip": flux_general_vram_config,
150
+ "diffsynth.models.flux_vae.FluxVAEEncoder": flux_general_vram_config,
151
+ "diffsynth.models.flux_vae.FluxVAEDecoder": flux_general_vram_config,
152
+ "diffsynth.models.flux_controlnet.FluxControlNet": flux_general_vram_config,
153
+ "diffsynth.models.flux_infiniteyou.InfiniteYouImageProjector": flux_general_vram_config,
154
+ "diffsynth.models.flux_ipadapter.FluxIpAdapter": flux_general_vram_config,
155
+ "diffsynth.models.flux_lora_patcher.FluxLoraPatcher": flux_general_vram_config,
156
+ "diffsynth.models.step1x_connector.Qwen2Connector": flux_general_vram_config,
157
+ "diffsynth.models.flux_lora_encoder.FluxLoRAEncoder": flux_general_vram_config,
158
+ "diffsynth.models.flux_text_encoder_t5.FluxTextEncoderT5": {
159
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
160
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
161
+ "transformers.models.t5.modeling_t5.T5LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
162
+ "transformers.models.t5.modeling_t5.T5DenseActDense": "diffsynth.core.vram.layers.AutoWrappedModule",
163
+ "transformers.models.t5.modeling_t5.T5DenseGatedActDense": "diffsynth.core.vram.layers.AutoWrappedModule",
164
+ },
165
+ "diffsynth.models.flux_ipadapter.SiglipVisionModelSO400M": {
166
+ "transformers.models.siglip.modeling_siglip.SiglipVisionEmbeddings": "diffsynth.core.vram.layers.AutoWrappedModule",
167
+ "transformers.models.siglip.modeling_siglip.SiglipEncoder": "diffsynth.core.vram.layers.AutoWrappedModule",
168
+ "transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead": "diffsynth.core.vram.layers.AutoWrappedModule",
169
+ "torch.nn.MultiheadAttention": "diffsynth.core.vram.layers.AutoWrappedModule",
170
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
171
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
172
+ },
173
+ "diffsynth.models.flux2_dit.Flux2DiT": {
174
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
175
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
176
+ "torch.nn.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
177
+ },
178
+ "diffsynth.models.flux2_text_encoder.Flux2TextEncoder": {
179
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
180
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
181
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
182
+ "transformers.models.mistral.modeling_mistral.MistralRMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
183
+ },
184
+ "diffsynth.models.flux2_vae.Flux2VAE": {
185
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
186
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
187
+ "torch.nn.GroupNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
188
+ },
189
+ "diffsynth.models.z_image_text_encoder.ZImageTextEncoder": {
190
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
191
+ "transformers.models.qwen3.modeling_qwen3.Qwen3RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
192
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
193
+ },
194
+ "diffsynth.models.z_image_dit.ZImageDiT": {
195
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
196
+ "diffsynth.models.z_image_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
197
+ },
198
+ "diffsynth.models.z_image_controlnet.ZImageControlNet": {
199
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
200
+ "diffsynth.models.z_image_dit.RMSNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
201
+ },
202
+ "diffsynth.models.z_image_image2lora.ZImageImage2LoRAModel": {
203
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
204
+ },
205
+ "diffsynth.models.siglip2_image_encoder.Siglip2ImageEncoder428M": {
206
+ "transformers.models.siglip2.modeling_siglip2.Siglip2VisionEmbeddings": "diffsynth.core.vram.layers.AutoWrappedModule",
207
+ "transformers.models.siglip2.modeling_siglip2.Siglip2MultiheadAttentionPoolingHead": "diffsynth.core.vram.layers.AutoWrappedModule",
208
+ "torch.nn.Conv2d": "diffsynth.core.vram.layers.AutoWrappedModule",
209
+ "torch.nn.Embedding": "diffsynth.core.vram.layers.AutoWrappedModule",
210
+ "torch.nn.LayerNorm": "diffsynth.core.vram.layers.AutoWrappedModule",
211
+ "torch.nn.Linear": "diffsynth.core.vram.layers.AutoWrappedLinear",
212
+ },
213
+ }
DiffSynth-Studio/diffsynth/core/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .attention import *
2
+ from .data import *
3
+ from .gradient import *
4
+ from .loader import *
5
+ from .vram import *
6
+ from .device import *
DiffSynth-Studio/diffsynth/core/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (265 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/attention/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .attention import attention_forward
DiffSynth-Studio/diffsynth/core/attention/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (212 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/attention/__pycache__/attention.cpython-39.pyc ADDED
Binary file (3.71 kB). View file
 
DiffSynth-Studio/diffsynth/core/attention/attention.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, os
2
+ from einops import rearrange
3
+
4
+
5
+ try:
6
+ import flash_attn_interface
7
+ FLASH_ATTN_3_AVAILABLE = True
8
+ except ModuleNotFoundError:
9
+ FLASH_ATTN_3_AVAILABLE = False
10
+
11
+ try:
12
+ import flash_attn
13
+ FLASH_ATTN_2_AVAILABLE = True
14
+ except ModuleNotFoundError:
15
+ FLASH_ATTN_2_AVAILABLE = False
16
+
17
+ try:
18
+ from sageattention import sageattn
19
+ SAGE_ATTN_AVAILABLE = True
20
+ except ModuleNotFoundError:
21
+ SAGE_ATTN_AVAILABLE = False
22
+
23
+ try:
24
+ import xformers.ops as xops
25
+ XFORMERS_AVAILABLE = True
26
+ except ModuleNotFoundError:
27
+ XFORMERS_AVAILABLE = False
28
+
29
+
30
+ def initialize_attention_priority():
31
+ if os.environ.get('DIFFSYNTH_ATTENTION_IMPLEMENTATION') is not None:
32
+ return os.environ.get('DIFFSYNTH_ATTENTION_IMPLEMENTATION').lower()
33
+ elif FLASH_ATTN_3_AVAILABLE:
34
+ return "flash_attention_3"
35
+ elif FLASH_ATTN_2_AVAILABLE:
36
+ return "flash_attention_2"
37
+ elif SAGE_ATTN_AVAILABLE:
38
+ return "sage_attention"
39
+ elif XFORMERS_AVAILABLE:
40
+ return "xformers"
41
+ else:
42
+ return "torch"
43
+
44
+
45
+ ATTENTION_IMPLEMENTATION = initialize_attention_priority()
46
+
47
+
48
+ def rearrange_qkv(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", required_in_pattern="b n s d", dims=None):
49
+ dims = {} if dims is None else dims
50
+ if q_pattern != required_in_pattern:
51
+ q = rearrange(q, f"{q_pattern} -> {required_in_pattern}", **dims)
52
+ if k_pattern != required_in_pattern:
53
+ k = rearrange(k, f"{k_pattern} -> {required_in_pattern}", **dims)
54
+ if v_pattern != required_in_pattern:
55
+ v = rearrange(v, f"{q_pattern} -> {required_in_pattern}", **dims)
56
+ return q, k, v
57
+
58
+
59
+ def rearrange_out(out: torch.Tensor, out_pattern="b n s d", required_out_pattern="b n s d", dims=None):
60
+ dims = {} if dims is None else dims
61
+ if out_pattern != required_out_pattern:
62
+ out = rearrange(out, f"{required_out_pattern} -> {out_pattern}", **dims)
63
+ return out
64
+
65
+
66
+ def torch_sdpa(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, attn_mask=None, scale=None):
67
+ required_in_pattern, required_out_pattern= "b n s d", "b n s d"
68
+ q, k, v = rearrange_qkv(q, k, v, q_pattern, k_pattern, v_pattern, required_in_pattern, dims)
69
+ out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask, scale=scale)
70
+ out = rearrange_out(out, out_pattern, required_out_pattern, dims)
71
+ return out
72
+
73
+
74
+ def flash_attention_3(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, scale=None):
75
+ required_in_pattern, required_out_pattern= "b s n d", "b s n d"
76
+ q, k, v = rearrange_qkv(q, k, v, q_pattern, k_pattern, v_pattern, required_in_pattern, dims)
77
+ out = flash_attn_interface.flash_attn_func(q, k, v, softmax_scale=scale)
78
+ if isinstance(out, tuple):
79
+ out = out[0]
80
+ out = rearrange_out(out, out_pattern, required_out_pattern, dims)
81
+ return out
82
+
83
+
84
+ def flash_attention_2(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, scale=None):
85
+ required_in_pattern, required_out_pattern= "b s n d", "b s n d"
86
+ q, k, v = rearrange_qkv(q, k, v, q_pattern, k_pattern, v_pattern, required_in_pattern, dims)
87
+ out = flash_attn.flash_attn_func(q, k, v, softmax_scale=scale)
88
+ out = rearrange_out(out, out_pattern, required_out_pattern, dims)
89
+ return out
90
+
91
+
92
+ def sage_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, scale=None):
93
+ required_in_pattern, required_out_pattern= "b n s d", "b n s d"
94
+ q, k, v = rearrange_qkv(q, k, v, q_pattern, k_pattern, v_pattern, required_in_pattern, dims)
95
+ out = sageattn(q, k, v, sm_scale=scale)
96
+ out = rearrange_out(out, out_pattern, required_out_pattern, dims)
97
+ return out
98
+
99
+
100
+ def xformers_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, scale=None):
101
+ required_in_pattern, required_out_pattern= "b s n d", "b s n d"
102
+ q, k, v = rearrange_qkv(q, k, v, q_pattern, k_pattern, v_pattern, required_in_pattern, dims)
103
+ out = xops.memory_efficient_attention(q, k, v, scale=scale)
104
+ out = rearrange_out(out, out_pattern, required_out_pattern, dims)
105
+ return out
106
+
107
+
108
+ def attention_forward(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, q_pattern="b n s d", k_pattern="b n s d", v_pattern="b n s d", out_pattern="b n s d", dims=None, attn_mask=None, scale=None, compatibility_mode=False):
109
+ if compatibility_mode or (attn_mask is not None):
110
+ return torch_sdpa(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, attn_mask=attn_mask, scale=scale)
111
+ else:
112
+ if ATTENTION_IMPLEMENTATION == "flash_attention_3":
113
+ return flash_attention_3(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, scale=scale)
114
+ elif ATTENTION_IMPLEMENTATION == "flash_attention_2":
115
+ return flash_attention_2(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, scale=scale)
116
+ elif ATTENTION_IMPLEMENTATION == "sage_attention":
117
+ return sage_attention(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, scale=scale)
118
+ elif ATTENTION_IMPLEMENTATION == "xformers":
119
+ return xformers_attention(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, scale=scale)
120
+ else:
121
+ return torch_sdpa(q, k, v, q_pattern, k_pattern, v_pattern, out_pattern, dims, scale=scale)
DiffSynth-Studio/diffsynth/core/data/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .unified_dataset import UnifiedDataset
DiffSynth-Studio/diffsynth/core/data/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (210 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/data/__pycache__/operators.cpython-39.pyc ADDED
Binary file (10.2 kB). View file
 
DiffSynth-Studio/diffsynth/core/data/__pycache__/unified_dataset.cpython-39.pyc ADDED
Binary file (3.1 kB). View file
 
DiffSynth-Studio/diffsynth/core/data/operators.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, torchvision, imageio, os
2
+ import imageio.v3 as iio
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ class DataProcessingPipeline:
7
+ def __init__(self, operators=None):
8
+ self.operators: list[DataProcessingOperator] = [] if operators is None else operators
9
+
10
+ def __call__(self, data):
11
+ for operator in self.operators:
12
+ data = operator(data)
13
+ return data
14
+
15
+ def __rshift__(self, pipe):
16
+ if isinstance(pipe, DataProcessingOperator):
17
+ pipe = DataProcessingPipeline([pipe])
18
+ return DataProcessingPipeline(self.operators + pipe.operators)
19
+
20
+
21
+ class DataProcessingOperator:
22
+ def __call__(self, data):
23
+ raise NotImplementedError("DataProcessingOperator cannot be called directly.")
24
+
25
+ def __rshift__(self, pipe):
26
+ if isinstance(pipe, DataProcessingOperator):
27
+ pipe = DataProcessingPipeline([pipe])
28
+ return DataProcessingPipeline([self]).__rshift__(pipe)
29
+
30
+
31
+ class DataProcessingOperatorRaw(DataProcessingOperator):
32
+ def __call__(self, data):
33
+ return data
34
+
35
+
36
+ class ToInt(DataProcessingOperator):
37
+ def __call__(self, data):
38
+ return int(data)
39
+
40
+
41
+ class ToFloat(DataProcessingOperator):
42
+ def __call__(self, data):
43
+ return float(data)
44
+
45
+
46
+ class ToStr(DataProcessingOperator):
47
+ def __init__(self, none_value=""):
48
+ self.none_value = none_value
49
+
50
+ def __call__(self, data):
51
+ if data is None: data = self.none_value
52
+ return str(data)
53
+
54
+
55
+ class LoadImage(DataProcessingOperator):
56
+ def __init__(self, convert_RGB=True, convert_RGBA=False):
57
+ self.convert_RGB = convert_RGB
58
+ self.convert_RGBA = convert_RGBA
59
+
60
+ def __call__(self, data: str):
61
+ image = Image.open(data)
62
+ if self.convert_RGB: image = image.convert("RGB")
63
+ if self.convert_RGBA: image = image.convert("RGBA")
64
+ return image
65
+
66
+
67
+ class ImageCropAndResize(DataProcessingOperator):
68
+ def __init__(self, height=None, width=None, max_pixels=None, height_division_factor=1, width_division_factor=1):
69
+ self.height = height
70
+ self.width = width
71
+ self.max_pixels = max_pixels
72
+ self.height_division_factor = height_division_factor
73
+ self.width_division_factor = width_division_factor
74
+
75
+ def crop_and_resize(self, image, target_height, target_width):
76
+ width, height = image.size
77
+ scale = max(target_width / width, target_height / height)
78
+ image = torchvision.transforms.functional.resize(
79
+ image,
80
+ (round(height*scale), round(width*scale)),
81
+ interpolation=torchvision.transforms.InterpolationMode.BILINEAR
82
+ )
83
+ image = torchvision.transforms.functional.center_crop(image, (target_height, target_width))
84
+ return image
85
+
86
+ def get_height_width(self, image):
87
+ if self.height is None or self.width is None:
88
+ width, height = image.size
89
+ if width * height > self.max_pixels:
90
+ scale = (width * height / self.max_pixels) ** 0.5
91
+ height, width = int(height / scale), int(width / scale)
92
+ height = height // self.height_division_factor * self.height_division_factor
93
+ width = width // self.width_division_factor * self.width_division_factor
94
+ else:
95
+ height, width = self.height, self.width
96
+ return height, width
97
+
98
+ def __call__(self, data: Image.Image):
99
+ image = self.crop_and_resize(data, *self.get_height_width(data))
100
+ return image
101
+
102
+
103
+ class ToList(DataProcessingOperator):
104
+ def __call__(self, data):
105
+ return [data]
106
+
107
+
108
+
109
+ class LoadVideo(DataProcessingOperator):
110
+ def __init__(self, num_frames=81, time_division_factor=4, time_division_remainder=1, frame_processor=lambda x: x):
111
+ self.num_frames = num_frames
112
+ self.time_division_factor = time_division_factor
113
+ self.time_division_remainder = time_division_remainder
114
+ # frame_processor is build in the video loader for high efficiency.
115
+ self.frame_processor = frame_processor
116
+
117
+ def get_num_frames(self, reader):
118
+ num_frames = self.num_frames
119
+ if int(reader.count_frames()) < num_frames:
120
+ num_frames = int(reader.count_frames())
121
+ while num_frames > 1 and num_frames % self.time_division_factor != self.time_division_remainder:
122
+ num_frames -= 1
123
+ return num_frames
124
+
125
+ def __call__(self, data: str):
126
+ reader = imageio.get_reader(data)
127
+
128
+ add_id = 0
129
+
130
+ frame_indices = np.linspace(
131
+ add_id,
132
+ add_id+80,
133
+ self.num_frames
134
+ ).round().astype(int)
135
+
136
+
137
+ frames = []
138
+ for idx in frame_indices:
139
+ frame = reader.get_data(idx)
140
+ frame = Image.fromarray(frame)
141
+ frame = self.frame_processor(frame)
142
+ frames.append(frame)
143
+
144
+
145
+ reader.close()
146
+
147
+
148
+ last = frames[-1]
149
+ for _ in range(4):
150
+ frames.append(last)
151
+
152
+ return frames
153
+
154
+
155
+ class SequencialProcess(DataProcessingOperator):
156
+ def __init__(self, operator=lambda x: x):
157
+ self.operator = operator
158
+
159
+ def __call__(self, data):
160
+ return [self.operator(i) for i in data]
161
+
162
+
163
+ class LoadGIF(DataProcessingOperator):
164
+ def __init__(self, num_frames=81, time_division_factor=4, time_division_remainder=1, frame_processor=lambda x: x):
165
+ self.num_frames = num_frames
166
+ self.time_division_factor = time_division_factor
167
+ self.time_division_remainder = time_division_remainder
168
+ # frame_processor is build in the video loader for high efficiency.
169
+ self.frame_processor = frame_processor
170
+
171
+ def get_num_frames(self, path):
172
+ num_frames = self.num_frames
173
+ images = iio.imread(path, mode="RGB")
174
+ if len(images) < num_frames:
175
+ num_frames = len(images)
176
+ while num_frames > 1 and num_frames % self.time_division_factor != self.time_division_remainder:
177
+ num_frames -= 1
178
+ return num_frames
179
+
180
+ def __call__(self, data: str):
181
+ num_frames = self.get_num_frames(data)
182
+ frames = []
183
+ images = iio.imread(data, mode="RGB")
184
+ for img in images:
185
+ frame = Image.fromarray(img)
186
+ frame = self.frame_processor(frame)
187
+ frames.append(frame)
188
+ if len(frames) >= num_frames:
189
+ break
190
+ return frames
191
+
192
+
193
+ class RouteByExtensionName(DataProcessingOperator):
194
+ def __init__(self, operator_map):
195
+ self.operator_map = operator_map
196
+
197
+ def __call__(self, data: str):
198
+ file_ext_name = data.split(".")[-1].lower()
199
+ for ext_names, operator in self.operator_map:
200
+ if ext_names is None or file_ext_name in ext_names:
201
+ return operator(data)
202
+ raise ValueError(f"Unsupported file: {data}")
203
+
204
+
205
+ class RouteByType(DataProcessingOperator):
206
+ def __init__(self, operator_map):
207
+ self.operator_map = operator_map
208
+
209
+ def __call__(self, data):
210
+ for dtype, operator in self.operator_map:
211
+ if dtype is None or isinstance(data, dtype):
212
+ return operator(data)
213
+ raise ValueError(f"Unsupported data: {data}")
214
+
215
+
216
+ class LoadTorchPickle(DataProcessingOperator):
217
+ def __init__(self, map_location="cpu"):
218
+ self.map_location = map_location
219
+
220
+ def __call__(self, data):
221
+ return torch.load(data, map_location=self.map_location, weights_only=False)
222
+
223
+
224
+ class ToAbsolutePath(DataProcessingOperator):
225
+ def __init__(self, base_path=""):
226
+ self.base_path = base_path
227
+
228
+ def __call__(self, data):
229
+ return os.path.join(self.base_path, data)
230
+
231
+
232
+ class LoadAudio(DataProcessingOperator):
233
+ def __init__(self, sr=16000):
234
+ self.sr = sr
235
+ def __call__(self, data: str):
236
+ import librosa
237
+ input_audio, sample_rate = librosa.load(data, sr=self.sr)
238
+ return input_audio
DiffSynth-Studio/diffsynth/core/data/unified_dataset.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .operators import *
2
+ import torch, json
3
+
4
+ def save_video_tensor_as_mp4(video_frames, out_path, fps=8):
5
+
6
+
7
+ # (C,T,H,W) -> (T,H,W,C)
8
+ video_np = []
9
+ for frame in video_frames:
10
+
11
+ frame_np = np.array(frame)
12
+ video_np.append(frame_np)
13
+
14
+
15
+ video = np.stack(video_np, axis=0)
16
+
17
+ imageio.mimwrite(
18
+ out_path,
19
+ video,
20
+ fps=fps,
21
+ codec="libx264",
22
+ quality=8,
23
+ )
24
+
25
+
26
+ class UnifiedDataset(torch.utils.data.Dataset):
27
+ def __init__(
28
+ self,
29
+ base_path=None,
30
+ repeat=1,
31
+ data_file_keys=tuple(),
32
+ main_data_operator=lambda x: x,
33
+ ):
34
+ self.base_path = base_path
35
+ self.repeat = repeat
36
+ self.data_file_keys = data_file_keys
37
+ self.main_data_operator = main_data_operator
38
+ self.data = []
39
+ self.load_metadata()
40
+
41
+ @staticmethod
42
+ def default_video_operator(
43
+ base_path="",
44
+ max_pixels=1920*1080, height=None, width=None,
45
+ height_division_factor=16, width_division_factor=16,
46
+ num_frames=81, time_division_factor=4, time_division_remainder=1,
47
+ ):
48
+ return RouteByType(operator_map=[
49
+ (str, ToAbsolutePath(base_path) >> RouteByExtensionName(operator_map=[
50
+ (("jpg", "jpeg", "png", "webp"), LoadImage() >> ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor) >> ToList()),
51
+ (("gif",), LoadGIF(
52
+ num_frames, time_division_factor, time_division_remainder,
53
+ frame_processor=ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor),
54
+ )),
55
+ (("mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"), LoadVideo(
56
+ num_frames, time_division_factor, time_division_remainder,
57
+ frame_processor=ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor),
58
+ )),
59
+ ])),
60
+ ])
61
+
62
+
63
+ def load_metadata(self):
64
+ src_dir = os.path.join(self.base_path, "point_video")
65
+ tgt_dir = os.path.join(self.base_path, "videos/train")
66
+
67
+ video_exts = (".mp4", ".avi", ".mov", ".mkv", ".webm")
68
+
69
+ for fname in os.listdir(src_dir):
70
+ if not fname.lower().endswith(video_exts):
71
+ continue
72
+
73
+ src_path = os.path.join(src_dir, fname)
74
+ tgt_path = os.path.join(tgt_dir, fname)
75
+
76
+ if not os.path.exists(tgt_path) or os.path.getsize(tgt_path) == 0:
77
+ print(f"跳过无效文件:{tgt_path}")
78
+ continue
79
+ if not os.path.exists(src_path) or os.path.getsize(src_path) == 0:
80
+ print(f"跳过无效文件:{src_path}")
81
+ continue
82
+
83
+ self.data.append({
84
+ "src_video": src_path,
85
+ "tgt_video": tgt_path,
86
+ "prompt": "Ensure the consistency of the video"
87
+ })
88
+
89
+ print(f"Found {len(self.data)} video pairs")
90
+
91
+
92
+
93
+ def __getitem__(self, data_id):
94
+
95
+ try:
96
+ data = self.data[data_id % len(self.data)].copy()
97
+ for key in self.data_file_keys:
98
+ if key in data:
99
+ data[key] = self.main_data_operator(data[key])
100
+ return data
101
+ except Exception:
102
+ return self.__getitem__(data_id + 1)
103
+
104
+ def __len__(self):
105
+ return len(self.data) * self.repeat
DiffSynth-Studio/diffsynth/core/data/unified_dataset_old.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .operators import *
2
+ import torch, json, pandas
3
+
4
+
5
+ class UnifiedDataset(torch.utils.data.Dataset):
6
+ def __init__(
7
+ self,
8
+ base_path=None, metadata_path=None,
9
+ repeat=1,
10
+ data_file_keys=tuple(),
11
+ main_data_operator=lambda x: x,
12
+ max_data_items=None,
13
+ ):
14
+ self.base_path = base_path
15
+ self.metadata_path = metadata_path
16
+ self.repeat = repeat
17
+ self.data_file_keys = data_file_keys
18
+ self.main_data_operator = main_data_operator
19
+ self.cached_data_operator = LoadTorchPickle()
20
+ self.max_data_items = max_data_items
21
+ self.data = []
22
+ self.cached_data = []
23
+ self.load_from_cache = metadata_path is None
24
+ self.load_metadata(metadata_path)
25
+
26
+ @staticmethod
27
+ def default_image_operator(
28
+ base_path="",
29
+ max_pixels=1920*1080, height=None, width=None,
30
+ height_division_factor=16, width_division_factor=16,
31
+ ):
32
+ return RouteByType(operator_map=[
33
+ (str, ToAbsolutePath(base_path) >> LoadImage() >> ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor)),
34
+ (list, SequencialProcess(ToAbsolutePath(base_path) >> LoadImage() >> ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor))),
35
+ ])
36
+
37
+ @staticmethod
38
+ def default_video_operator(
39
+ base_path="",
40
+ max_pixels=1920*1080, height=None, width=None,
41
+ height_division_factor=16, width_division_factor=16,
42
+ num_frames=81, time_division_factor=4, time_division_remainder=1,
43
+ ):
44
+ return RouteByType(operator_map=[
45
+ (str, ToAbsolutePath(base_path) >> RouteByExtensionName(operator_map=[
46
+ (("jpg", "jpeg", "png", "webp"), LoadImage() >> ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor) >> ToList()),
47
+ (("gif",), LoadGIF(
48
+ num_frames, time_division_factor, time_division_remainder,
49
+ frame_processor=ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor),
50
+ )),
51
+ (("mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"), LoadVideo(
52
+ num_frames, time_division_factor, time_division_remainder,
53
+ frame_processor=ImageCropAndResize(height, width, max_pixels, height_division_factor, width_division_factor),
54
+ )),
55
+ ])),
56
+ ])
57
+
58
+ def search_for_cached_data_files(self, path):
59
+ for file_name in os.listdir(path):
60
+ subpath = os.path.join(path, file_name)
61
+ if os.path.isdir(subpath):
62
+ self.search_for_cached_data_files(subpath)
63
+ elif subpath.endswith(".pth"):
64
+ self.cached_data.append(subpath)
65
+
66
+ def load_metadata(self, metadata_path):
67
+
68
+ if metadata_path == "folder_pair":
69
+ print("Loading paired videos directly from folders")
70
+
71
+ src_dir = os.path.join(self.base_path, "src")
72
+ tgt_dir = os.path.join(self.base_path, "tgt")
73
+
74
+ video_exts = (".mp4", ".avi", ".mov", ".mkv", ".webm")
75
+ self.data = []
76
+
77
+ for fname in os.listdir(src_dir):
78
+ if not fname.lower().endswith(video_exts):
79
+ continue
80
+
81
+ src_path = os.path.join(src_dir, fname)
82
+ tgt_path = os.path.join(tgt_dir, fname)
83
+
84
+ if not os.path.exists(tgt_path):
85
+ continue
86
+
87
+ self.data.append({
88
+ "src_video": src_path,
89
+ "tgt_video": tgt_path,
90
+ })
91
+
92
+ print(f"Found {len(self.data)} video pairs")
93
+
94
+
95
+ elif metadata_path is None:
96
+ print("No metadata_path. Searching for cached data files.")
97
+ self.search_for_cached_data_files(self.base_path)
98
+ print(f"{len(self.cached_data)} cached data files found.")
99
+ elif metadata_path.endswith(".json"):
100
+ with open(metadata_path, "r") as f:
101
+ metadata = json.load(f)
102
+ self.data = metadata
103
+ elif metadata_path.endswith(".jsonl"):
104
+ metadata = []
105
+ with open(metadata_path, 'r') as f:
106
+ for line in f:
107
+ metadata.append(json.loads(line.strip()))
108
+ self.data = metadata
109
+ else:
110
+ metadata = pandas.read_csv(metadata_path)
111
+ self.data = [metadata.iloc[i].to_dict() for i in range(len(metadata))]
112
+
113
+ def __getitem__(self, data_id):
114
+ if self.load_from_cache:
115
+ data = self.cached_data[data_id % len(self.cached_data)]
116
+ data = self.cached_data_operator(data)
117
+ else:
118
+ data = self.data[data_id % len(self.data)].copy()
119
+ for key in self.data_file_keys:
120
+ if key in data:
121
+ data[key] = self.main_data_operator(data[key])
122
+ return data
123
+
124
+ def __len__(self):
125
+ if self.max_data_items is not None:
126
+ return self.max_data_items
127
+ elif self.load_from_cache:
128
+ return len(self.cached_data) * self.repeat
129
+ else:
130
+ return len(self.data) * self.repeat
131
+
132
+ def check_data_equal(self, data1, data2):
133
+ # Debug only
134
+ if len(data1) != len(data2):
135
+ return False
136
+ for k in data1:
137
+ if data1[k] != data2[k]:
138
+ return False
139
+ return True
DiffSynth-Studio/diffsynth/core/device/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .npu_compatible_device import parse_device_type, parse_nccl_backend, get_available_device_type, get_device_name
2
+ from .npu_compatible_device import IS_NPU_AVAILABLE, IS_CUDA_AVAILABLE
DiffSynth-Studio/diffsynth/core/device/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (379 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/device/__pycache__/npu_compatible_device.cpython-39.pyc ADDED
Binary file (3.18 kB). View file
 
DiffSynth-Studio/diffsynth/core/device/npu_compatible_device.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import torch
3
+ from typing import Any
4
+
5
+
6
+ def is_torch_npu_available():
7
+ return importlib.util.find_spec("torch_npu") is not None
8
+
9
+
10
+ IS_CUDA_AVAILABLE = torch.cuda.is_available()
11
+ IS_NPU_AVAILABLE = is_torch_npu_available() and torch.npu.is_available()
12
+
13
+ if IS_NPU_AVAILABLE:
14
+ import torch_npu
15
+
16
+ torch.npu.config.allow_internal_format = False
17
+
18
+
19
+ def get_device_type() -> str:
20
+ """Get device type based on current machine, currently only support CPU, CUDA, NPU."""
21
+ if IS_CUDA_AVAILABLE:
22
+ device = "cuda"
23
+ elif IS_NPU_AVAILABLE:
24
+ device = "npu"
25
+ else:
26
+ device = "cpu"
27
+
28
+ return device
29
+
30
+
31
+ def get_torch_device() -> Any:
32
+ """Get torch attribute based on device type, e.g. torch.cuda or torch.npu"""
33
+ device_name = get_device_type()
34
+
35
+ try:
36
+ return getattr(torch, device_name)
37
+ except AttributeError:
38
+ print(f"Device namespace '{device_name}' not found in torch, try to load 'torch.cuda'.")
39
+ return torch.cuda
40
+
41
+
42
+ def get_device_id() -> int:
43
+ """Get current device id based on device type."""
44
+ return get_torch_device().current_device()
45
+
46
+
47
+ def get_device_name() -> str:
48
+ """Get current device name based on device type."""
49
+ return f"{get_device_type()}:{get_device_id()}"
50
+
51
+
52
+ def synchronize() -> None:
53
+ """Execute torch synchronize operation."""
54
+ get_torch_device().synchronize()
55
+
56
+
57
+ def empty_cache() -> None:
58
+ """Execute torch empty cache operation."""
59
+ get_torch_device().empty_cache()
60
+
61
+
62
+ def get_nccl_backend() -> str:
63
+ """Return distributed communication backend type based on device type."""
64
+ if IS_CUDA_AVAILABLE:
65
+ return "nccl"
66
+ elif IS_NPU_AVAILABLE:
67
+ return "hccl"
68
+ else:
69
+ raise RuntimeError(f"No available distributed communication backend found on device type {get_device_type()}.")
70
+
71
+
72
+ def enable_high_precision_for_bf16():
73
+ """
74
+ Set high accumulation dtype for matmul and reduction.
75
+ """
76
+ if IS_CUDA_AVAILABLE:
77
+ torch.backends.cuda.matmul.allow_tf32 = False
78
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
79
+
80
+ if IS_NPU_AVAILABLE:
81
+ torch.npu.matmul.allow_tf32 = False
82
+ torch.npu.matmul.allow_bf16_reduced_precision_reduction = False
83
+
84
+
85
+ def parse_device_type(device):
86
+ if isinstance(device, str):
87
+ if device.startswith("cuda"):
88
+ return "cuda"
89
+ elif device.startswith("npu"):
90
+ return "npu"
91
+ else:
92
+ return "cpu"
93
+ elif isinstance(device, torch.device):
94
+ return device.type
95
+
96
+
97
+ def parse_nccl_backend(device_type):
98
+ if device_type == "cuda":
99
+ return "nccl"
100
+ elif device_type == "npu":
101
+ return "hccl"
102
+ else:
103
+ raise RuntimeError(f"No available distributed communication backend found on device type {device_type}.")
104
+
105
+
106
+ def get_available_device_type():
107
+ return get_device_type()
DiffSynth-Studio/diffsynth/core/gradient/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .gradient_checkpoint import gradient_checkpoint_forward
DiffSynth-Studio/diffsynth/core/gradient/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (231 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/gradient/__pycache__/gradient_checkpoint.cpython-39.pyc ADDED
Binary file (970 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/gradient/gradient_checkpoint.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def create_custom_forward(module):
5
+ def custom_forward(*inputs, **kwargs):
6
+ return module(*inputs, **kwargs)
7
+ return custom_forward
8
+
9
+
10
+ def gradient_checkpoint_forward(
11
+ model,
12
+ use_gradient_checkpointing,
13
+ use_gradient_checkpointing_offload,
14
+ *args,
15
+ **kwargs,
16
+ ):
17
+ if use_gradient_checkpointing_offload:
18
+ with torch.autograd.graph.save_on_cpu():
19
+ model_output = torch.utils.checkpoint.checkpoint(
20
+ create_custom_forward(model),
21
+ *args,
22
+ **kwargs,
23
+ use_reentrant=False,
24
+ )
25
+ elif use_gradient_checkpointing:
26
+ model_output = torch.utils.checkpoint.checkpoint(
27
+ create_custom_forward(model),
28
+ *args,
29
+ **kwargs,
30
+ use_reentrant=False,
31
+ )
32
+ else:
33
+ model_output = model(*args, **kwargs)
34
+ return model_output
DiffSynth-Studio/diffsynth/core/loader/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .file import load_state_dict, hash_state_dict_keys, hash_model_file
2
+ from .model import load_model, load_model_with_disk_offload
3
+ from .config import ModelConfig
DiffSynth-Studio/diffsynth/core/loader/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (380 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/loader/__pycache__/config.cpython-39.pyc ADDED
Binary file (3.96 kB). View file
 
DiffSynth-Studio/diffsynth/core/loader/__pycache__/file.cpython-39.pyc ADDED
Binary file (3.49 kB). View file
 
DiffSynth-Studio/diffsynth/core/loader/__pycache__/model.cpython-39.pyc ADDED
Binary file (2.66 kB). View file
 
DiffSynth-Studio/diffsynth/core/loader/config.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, glob, os
2
+ from typing import Optional, Union
3
+ from dataclasses import dataclass
4
+ # from modelscope import snapshot_download
5
+ from huggingface_hub import snapshot_download as hf_snapshot_download
6
+ from typing import Optional
7
+
8
+
9
+ @dataclass
10
+ class ModelConfig:
11
+ path: Union[str, list[str]] = None
12
+ model_id: str = None
13
+ origin_file_pattern: Union[str, list[str]] = None
14
+ download_source: str = None
15
+ local_model_path: str = None
16
+ skip_download: bool = None
17
+ offload_device: Optional[Union[str, torch.device]] = None
18
+ offload_dtype: Optional[torch.dtype] = None
19
+ onload_device: Optional[Union[str, torch.device]] = None
20
+ onload_dtype: Optional[torch.dtype] = None
21
+ preparing_device: Optional[Union[str, torch.device]] = None
22
+ preparing_dtype: Optional[torch.dtype] = None
23
+ computation_device: Optional[Union[str, torch.device]] = None
24
+ computation_dtype: Optional[torch.dtype] = None
25
+ clear_parameters: bool = False
26
+
27
+ def check_input(self):
28
+ if self.path is None and self.model_id is None:
29
+ raise ValueError(f"""No valid model files. Please use `ModelConfig(path="xxx")` or `ModelConfig(model_id="xxx/yyy", origin_file_pattern="zzz")`. `skip_download=True` only supports the first one.""")
30
+
31
+ def parse_original_file_pattern(self):
32
+ if self.origin_file_pattern is None or self.origin_file_pattern == "":
33
+ return "*"
34
+ elif self.origin_file_pattern.endswith("/"):
35
+ return self.origin_file_pattern + "*"
36
+ else:
37
+ return self.origin_file_pattern
38
+
39
+ def parse_download_source(self):
40
+ if self.download_source is None:
41
+ if os.environ.get('DIFFSYNTH_DOWNLOAD_SOURCE') is not None:
42
+ return os.environ.get('DIFFSYNTH_DOWNLOAD_SOURCE')
43
+ else:
44
+ return "modelscope"
45
+ else:
46
+ return self.download_source
47
+
48
+ def parse_skip_download(self):
49
+ if self.skip_download is None:
50
+ if os.environ.get('DIFFSYNTH_SKIP_DOWNLOAD') is not None:
51
+ if os.environ.get('DIFFSYNTH_SKIP_DOWNLOAD').lower() == "true":
52
+ return True
53
+ elif os.environ.get('DIFFSYNTH_SKIP_DOWNLOAD').lower() == "false":
54
+ return False
55
+ else:
56
+ return False
57
+ else:
58
+ return self.skip_download
59
+
60
+ def download(self):
61
+ origin_file_pattern = self.parse_original_file_pattern()
62
+ downloaded_files = glob.glob(origin_file_pattern, root_dir=os.path.join(self.local_model_path, self.model_id))
63
+ download_source = self.parse_download_source()
64
+
65
+
66
+ # if download_source.lower() == "modelscope":
67
+ # snapshot_download(
68
+ # self.model_id,
69
+ # local_dir=os.path.join(self.local_model_path, self.model_id),
70
+ # allow_file_pattern=origin_file_pattern,
71
+ # ignore_file_pattern=downloaded_files,
72
+ # local_files_only=False
73
+ # )
74
+ # elif
75
+
76
+ if download_source.lower() == "huggingface":
77
+ hf_snapshot_download(
78
+ self.model_id,
79
+ local_dir=os.path.join(self.local_model_path, self.model_id),
80
+ allow_patterns=origin_file_pattern,
81
+ ignore_patterns=downloaded_files,
82
+ local_files_only=False
83
+ )
84
+ else:
85
+ raise ValueError("`download_source` should be `modelscope` or `huggingface`.")
86
+
87
+ def require_downloading(self):
88
+ if self.path is not None:
89
+ return False
90
+ skip_download = self.parse_skip_download()
91
+ return not skip_download
92
+
93
+ def reset_local_model_path(self):
94
+ if os.environ.get('DIFFSYNTH_MODEL_BASE_PATH') is not None:
95
+ self.local_model_path = os.environ.get('DIFFSYNTH_MODEL_BASE_PATH')
96
+ elif self.local_model_path is None:
97
+ self.local_model_path = "./models"
98
+
99
+ def download_if_necessary(self):
100
+ self.check_input()
101
+ self.reset_local_model_path()
102
+ if self.require_downloading():
103
+ self.download()
104
+ if self.path is None:
105
+ if self.origin_file_pattern is None or self.origin_file_pattern == "":
106
+ self.path = os.path.join(self.local_model_path, self.model_id)
107
+ else:
108
+ self.path = glob.glob(os.path.join(self.local_model_path, self.model_id, self.origin_file_pattern))
109
+ if isinstance(self.path, list) and len(self.path) == 1:
110
+ self.path = self.path[0]
111
+
112
+ def vram_config(self):
113
+ return {
114
+ "offload_device": self.offload_device,
115
+ "offload_dtype": self.offload_dtype,
116
+ "onload_device": self.onload_device,
117
+ "onload_dtype": self.onload_dtype,
118
+ "preparing_device": self.preparing_device,
119
+ "preparing_dtype": self.preparing_dtype,
120
+ "computation_device": self.computation_device,
121
+ "computation_dtype": self.computation_dtype,
122
+ }
DiffSynth-Studio/diffsynth/core/loader/file.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from safetensors import safe_open
2
+ import torch, hashlib
3
+
4
+
5
+ def load_state_dict(file_path, torch_dtype=None, device="cpu"):
6
+ if isinstance(file_path, list):
7
+ state_dict = {}
8
+ for file_path_ in file_path:
9
+ state_dict.update(load_state_dict(file_path_, torch_dtype, device))
10
+ return state_dict
11
+ if file_path.endswith(".safetensors"):
12
+ return load_state_dict_from_safetensors(file_path, torch_dtype=torch_dtype, device=device)
13
+ else:
14
+ return load_state_dict_from_bin(file_path, torch_dtype=torch_dtype, device=device)
15
+
16
+
17
+ def load_state_dict_from_safetensors(file_path, torch_dtype=None, device="cpu"):
18
+ state_dict = {}
19
+ with safe_open(file_path, framework="pt", device=str(device)) as f:
20
+ for k in f.keys():
21
+ state_dict[k] = f.get_tensor(k)
22
+ if torch_dtype is not None:
23
+ state_dict[k] = state_dict[k].to(torch_dtype)
24
+ return state_dict
25
+
26
+
27
+ def load_state_dict_from_bin(file_path, torch_dtype=None, device="cpu"):
28
+ state_dict = torch.load(file_path, map_location=device, weights_only=True)
29
+ if len(state_dict) == 1:
30
+ if "state_dict" in state_dict:
31
+ state_dict = state_dict["state_dict"]
32
+ elif "module" in state_dict:
33
+ state_dict = state_dict["module"]
34
+ elif "model_state" in state_dict:
35
+ state_dict = state_dict["model_state"]
36
+ if torch_dtype is not None:
37
+ for i in state_dict:
38
+ if isinstance(state_dict[i], torch.Tensor):
39
+ state_dict[i] = state_dict[i].to(torch_dtype)
40
+ return state_dict
41
+
42
+
43
+ def convert_state_dict_keys_to_single_str(state_dict, with_shape=True):
44
+ keys = []
45
+ for key, value in state_dict.items():
46
+ if isinstance(key, str):
47
+ if isinstance(value, torch.Tensor):
48
+ if with_shape:
49
+ shape = "_".join(map(str, list(value.shape)))
50
+ keys.append(key + ":" + shape)
51
+ keys.append(key)
52
+ elif isinstance(value, dict):
53
+ keys.append(key + "|" + convert_state_dict_keys_to_single_str(value, with_shape=with_shape))
54
+ keys.sort()
55
+ keys_str = ",".join(keys)
56
+ return keys_str
57
+
58
+
59
+ def hash_state_dict_keys(state_dict, with_shape=True):
60
+ keys_str = convert_state_dict_keys_to_single_str(state_dict, with_shape=with_shape)
61
+ keys_str = keys_str.encode(encoding="UTF-8")
62
+ return hashlib.md5(keys_str).hexdigest()
63
+
64
+
65
+ def load_keys_dict(file_path):
66
+ if isinstance(file_path, list):
67
+ state_dict = {}
68
+ for file_path_ in file_path:
69
+ state_dict.update(load_keys_dict(file_path_))
70
+ return state_dict
71
+ if file_path.endswith(".safetensors"):
72
+ return load_keys_dict_from_safetensors(file_path)
73
+ else:
74
+ return load_keys_dict_from_bin(file_path)
75
+
76
+
77
+ def load_keys_dict_from_safetensors(file_path):
78
+ keys_dict = {}
79
+ with safe_open(file_path, framework="pt", device="cpu") as f:
80
+ for k in f.keys():
81
+ keys_dict[k] = f.get_slice(k).get_shape()
82
+ return keys_dict
83
+
84
+
85
+ def convert_state_dict_to_keys_dict(state_dict):
86
+ keys_dict = {}
87
+ for k, v in state_dict.items():
88
+ if isinstance(v, torch.Tensor):
89
+ keys_dict[k] = list(v.shape)
90
+ else:
91
+ keys_dict[k] = convert_state_dict_to_keys_dict(v)
92
+ return keys_dict
93
+
94
+
95
+ def load_keys_dict_from_bin(file_path):
96
+ state_dict = load_state_dict_from_bin(file_path)
97
+ keys_dict = convert_state_dict_to_keys_dict(state_dict)
98
+ return keys_dict
99
+
100
+
101
+ def convert_keys_dict_to_single_str(state_dict, with_shape=True):
102
+ keys = []
103
+ for key, value in state_dict.items():
104
+ if isinstance(key, str):
105
+ if isinstance(value, dict):
106
+ keys.append(key + "|" + convert_keys_dict_to_single_str(value, with_shape=with_shape))
107
+ else:
108
+ if with_shape:
109
+ shape = "_".join(map(str, list(value)))
110
+ keys.append(key + ":" + shape)
111
+ keys.append(key)
112
+ keys.sort()
113
+ keys_str = ",".join(keys)
114
+ return keys_str
115
+
116
+
117
+ def hash_model_file(path, with_shape=True):
118
+ keys_dict = load_keys_dict(path)
119
+ keys_str = convert_keys_dict_to_single_str(keys_dict, with_shape=with_shape)
120
+ keys_str = keys_str.encode(encoding="UTF-8")
121
+ return hashlib.md5(keys_str).hexdigest()
DiffSynth-Studio/diffsynth/core/loader/model.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..vram.initialization import skip_model_initialization
2
+ from ..vram.disk_map import DiskMap
3
+ from ..vram.layers import enable_vram_management
4
+ from .file import load_state_dict
5
+ import torch
6
+
7
+
8
+ def load_model(model_class, path, config=None, torch_dtype=torch.bfloat16, device="cpu", state_dict_converter=None, use_disk_map=False, module_map=None, vram_config=None, vram_limit=None):
9
+ config = {} if config is None else config
10
+ # Why do we use `skip_model_initialization`?
11
+ # It skips the random initialization of model parameters,
12
+ # thereby speeding up model loading and avoiding excessive memory usage.
13
+ with skip_model_initialization():
14
+ model = model_class(**config)
15
+ # What is `module_map`?
16
+ # This is a module mapping table for VRAM management.
17
+ if module_map is not None:
18
+ devices = [vram_config["offload_device"], vram_config["onload_device"], vram_config["preparing_device"], vram_config["computation_device"]]
19
+ device = [d for d in devices if d != "disk"][0]
20
+ dtypes = [vram_config["offload_dtype"], vram_config["onload_dtype"], vram_config["preparing_dtype"], vram_config["computation_dtype"]]
21
+ dtype = [d for d in dtypes if d != "disk"][0]
22
+ if vram_config["offload_device"] != "disk":
23
+ state_dict = DiskMap(path, device, torch_dtype=dtype)
24
+ if state_dict_converter is not None:
25
+ state_dict = state_dict_converter(state_dict)
26
+ else:
27
+ state_dict = {i: state_dict[i] for i in state_dict}
28
+ model.load_state_dict(state_dict, assign=True)
29
+ model = enable_vram_management(model, module_map, vram_config=vram_config, disk_map=None, vram_limit=vram_limit)
30
+ else:
31
+ disk_map = DiskMap(path, device, state_dict_converter=state_dict_converter)
32
+ model = enable_vram_management(model, module_map, vram_config=vram_config, disk_map=disk_map, vram_limit=vram_limit)
33
+ else:
34
+ # Why do we use `DiskMap`?
35
+ # Sometimes a model file contains multiple models,
36
+ # and DiskMap can load only the parameters of a single model,
37
+ # avoiding the need to load all parameters in the file.
38
+ if use_disk_map:
39
+ state_dict = DiskMap(path, device, torch_dtype=torch_dtype)
40
+ else:
41
+ state_dict = load_state_dict(path, torch_dtype, device)
42
+ # Why do we use `state_dict_converter`?
43
+ # Some models are saved in complex formats,
44
+ # and we need to convert the state dict into the appropriate format.
45
+ if state_dict_converter is not None:
46
+ state_dict = state_dict_converter(state_dict)
47
+ else:
48
+ state_dict = {i: state_dict[i] for i in state_dict}
49
+ model.load_state_dict(state_dict, assign=True, strict=False)
50
+ # Why do we call `to()`?
51
+ # Because some models override the behavior of `to()`,
52
+ # especially those from libraries like Transformers.
53
+ if any(p.is_meta for p in model.parameters()):
54
+ model = model.to_empty(device=device)
55
+ model = model.to(dtype=torch_dtype)
56
+ else:
57
+ model = model.to(dtype=torch_dtype, device=device)
58
+ if hasattr(model, "eval"):
59
+ model = model.eval()
60
+ return model
61
+
62
+
63
+ def load_model_with_disk_offload(model_class, path, config=None, torch_dtype=torch.bfloat16, device="cpu", state_dict_converter=None, module_map=None):
64
+ if isinstance(path, str):
65
+ path = [path]
66
+ config = {} if config is None else config
67
+ with skip_model_initialization():
68
+ model = model_class(**config)
69
+ if hasattr(model, "eval"):
70
+ model = model.eval()
71
+ disk_map = DiskMap(path, device, state_dict_converter=state_dict_converter)
72
+ vram_config = {
73
+ "offload_dtype": "disk",
74
+ "offload_device": "disk",
75
+ "onload_dtype": "disk",
76
+ "onload_device": "disk",
77
+ "preparing_dtype": torch.float8_e4m3fn,
78
+ "preparing_device": device,
79
+ "computation_dtype": torch_dtype,
80
+ "computation_device": device,
81
+ }
82
+ enable_vram_management(model, module_map, vram_config=vram_config, disk_map=disk_map, vram_limit=80)
83
+ return model
DiffSynth-Studio/diffsynth/core/vram/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .initialization import skip_model_initialization
2
+ from .layers import *
DiffSynth-Studio/diffsynth/core/vram/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (243 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/vram/__pycache__/disk_map.cpython-39.pyc ADDED
Binary file (3.77 kB). View file
 
DiffSynth-Studio/diffsynth/core/vram/__pycache__/initialization.cpython-39.pyc ADDED
Binary file (870 Bytes). View file
 
DiffSynth-Studio/diffsynth/core/vram/__pycache__/layers.cpython-39.pyc ADDED
Binary file (13.2 kB). View file
 
DiffSynth-Studio/diffsynth/core/vram/disk_map.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from safetensors import safe_open
2
+ import torch, os
3
+
4
+
5
+ class SafetensorsCompatibleTensor:
6
+ def __init__(self, tensor):
7
+ self.tensor = tensor
8
+
9
+ def get_shape(self):
10
+ return list(self.tensor.shape)
11
+
12
+
13
+ class SafetensorsCompatibleBinaryLoader:
14
+ def __init__(self, path, device):
15
+ print("Detected non-safetensors files, which may cause slower loading. It's recommended to convert it to a safetensors file.")
16
+ self.state_dict = torch.load(path, weights_only=True, map_location=device)
17
+
18
+ def keys(self):
19
+ return self.state_dict.keys()
20
+
21
+ def get_tensor(self, name):
22
+ return self.state_dict[name]
23
+
24
+ def get_slice(self, name):
25
+ return SafetensorsCompatibleTensor(self.state_dict[name])
26
+
27
+
28
+ class DiskMap:
29
+
30
+ def __init__(self, path, device, torch_dtype=None, state_dict_converter=None, buffer_size=10**9):
31
+ self.path = path if isinstance(path, list) else [path]
32
+ self.device = device
33
+ self.torch_dtype = torch_dtype
34
+ if os.environ.get('DIFFSYNTH_DISK_MAP_BUFFER_SIZE') is not None:
35
+ self.buffer_size = int(os.environ.get('DIFFSYNTH_DISK_MAP_BUFFER_SIZE'))
36
+ else:
37
+ self.buffer_size = buffer_size
38
+ self.files = []
39
+ self.flush_files()
40
+ self.name_map = {}
41
+ for file_id, file in enumerate(self.files):
42
+ for name in file.keys():
43
+ self.name_map[name] = file_id
44
+ self.rename_dict = self.fetch_rename_dict(state_dict_converter)
45
+
46
+ def flush_files(self):
47
+ if len(self.files) == 0:
48
+ for path in self.path:
49
+ if path.endswith(".safetensors"):
50
+ self.files.append(safe_open(path, framework="pt", device=str(self.device)))
51
+ else:
52
+ self.files.append(SafetensorsCompatibleBinaryLoader(path, device=self.device))
53
+ else:
54
+ for i, path in enumerate(self.path):
55
+ if path.endswith(".safetensors"):
56
+ self.files[i] = safe_open(path, framework="pt", device=str(self.device))
57
+ self.num_params = 0
58
+
59
+ def __getitem__(self, name):
60
+ if self.rename_dict is not None: name = self.rename_dict[name]
61
+ file_id = self.name_map[name]
62
+ param = self.files[file_id].get_tensor(name)
63
+ if self.torch_dtype is not None and isinstance(param, torch.Tensor):
64
+ param = param.to(self.torch_dtype)
65
+ if isinstance(param, torch.Tensor) and param.device == "cpu":
66
+ param = param.clone()
67
+ if isinstance(param, torch.Tensor):
68
+ self.num_params += param.numel()
69
+ if self.num_params > self.buffer_size:
70
+ self.flush_files()
71
+ return param
72
+
73
+ def fetch_rename_dict(self, state_dict_converter):
74
+ if state_dict_converter is None:
75
+ return None
76
+ state_dict = {}
77
+ for file in self.files:
78
+ for name in file.keys():
79
+ state_dict[name] = name
80
+ state_dict = state_dict_converter(state_dict)
81
+ return state_dict
82
+
83
+ def __iter__(self):
84
+ if self.rename_dict is not None:
85
+ return self.rename_dict.__iter__()
86
+ else:
87
+ return self.name_map.__iter__()
88
+
89
+ def __contains__(self, x):
90
+ if self.rename_dict is not None:
91
+ return x in self.rename_dict
92
+ else:
93
+ return x in self.name_map
DiffSynth-Studio/diffsynth/core/vram/initialization.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from contextlib import contextmanager
3
+
4
+
5
+ @contextmanager
6
+ def skip_model_initialization(device=torch.device("meta")):
7
+
8
+ def register_empty_parameter(module, name, param):
9
+ old_register_parameter(module, name, param)
10
+ if param is not None:
11
+ param_cls = type(module._parameters[name])
12
+ kwargs = module._parameters[name].__dict__
13
+ kwargs["requires_grad"] = param.requires_grad
14
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
15
+
16
+ old_register_parameter = torch.nn.Module.register_parameter
17
+ torch.nn.Module.register_parameter = register_empty_parameter
18
+ try:
19
+ yield
20
+ finally:
21
+ torch.nn.Module.register_parameter = old_register_parameter
DiffSynth-Studio/diffsynth/core/vram/layers.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, copy
2
+ from typing import Union
3
+ from .initialization import skip_model_initialization
4
+ from .disk_map import DiskMap
5
+ from ..device import parse_device_type, get_device_name, IS_NPU_AVAILABLE
6
+
7
+
8
+ class AutoTorchModule(torch.nn.Module):
9
+
10
+ def __init__(
11
+ self,
12
+ offload_dtype: torch.dtype = None,
13
+ offload_device: Union[str, torch.device] = None,
14
+ onload_dtype: torch.dtype = None,
15
+ onload_device: Union[str, torch.device] = None,
16
+ preparing_dtype: torch.dtype = None,
17
+ preparing_device: Union[str, torch.device] = None,
18
+ computation_dtype: torch.dtype = None,
19
+ computation_device: Union[str, torch.device] = None,
20
+ vram_limit: float = None,
21
+ ):
22
+ super().__init__()
23
+ self.set_dtype_and_device(
24
+ offload_dtype,
25
+ offload_device,
26
+ onload_dtype,
27
+ onload_device,
28
+ preparing_dtype,
29
+ preparing_device,
30
+ computation_dtype,
31
+ computation_device,
32
+ vram_limit,
33
+ )
34
+ self.state = 0
35
+ self.name = ""
36
+ self.computation_device_type = parse_device_type(self.computation_device)
37
+
38
+ def set_dtype_and_device(
39
+ self,
40
+ offload_dtype: torch.dtype = None,
41
+ offload_device: Union[str, torch.device] = None,
42
+ onload_dtype: torch.dtype = None,
43
+ onload_device: Union[str, torch.device] = None,
44
+ preparing_dtype: torch.dtype = None,
45
+ preparing_device: Union[str, torch.device] = None,
46
+ computation_dtype: torch.dtype = None,
47
+ computation_device: Union[str, torch.device] = None,
48
+ vram_limit: float = None,
49
+ ):
50
+ self.offload_dtype = offload_dtype or computation_dtype
51
+ self.offload_device = offload_device or computation_dtype
52
+ self.onload_dtype = onload_dtype or computation_dtype
53
+ self.onload_device = onload_device or computation_dtype
54
+ self.preparing_dtype = preparing_dtype or computation_dtype
55
+ self.preparing_device = preparing_device or computation_dtype
56
+ self.computation_dtype = computation_dtype
57
+ self.computation_device = computation_device
58
+ self.vram_limit = vram_limit
59
+
60
+ def cast_to(self, weight, dtype, device):
61
+ r = torch.empty_like(weight, dtype=dtype, device=device)
62
+ r.copy_(weight)
63
+ return r
64
+
65
+ def check_free_vram(self):
66
+ device = self.computation_device if not IS_NPU_AVAILABLE else get_device_name()
67
+ gpu_mem_state = getattr(torch, self.computation_device_type).mem_get_info(device)
68
+ used_memory = (gpu_mem_state[1] - gpu_mem_state[0]) / (1024**3)
69
+ return used_memory < self.vram_limit
70
+
71
+ def offload(self):
72
+ if self.state != 0:
73
+ self.to(dtype=self.offload_dtype, device=self.offload_device)
74
+ self.state = 0
75
+
76
+ def onload(self):
77
+ if self.state != 1:
78
+ self.to(dtype=self.onload_dtype, device=self.onload_device)
79
+ self.state = 1
80
+
81
+ def param_name(self, name):
82
+ if self.name == "":
83
+ return name
84
+ else:
85
+ return self.name + "." + name
86
+
87
+
88
+ class AutoWrappedModule(AutoTorchModule):
89
+
90
+ def __init__(
91
+ self,
92
+ module: torch.nn.Module,
93
+ offload_dtype: torch.dtype = None,
94
+ offload_device: Union[str, torch.device] = None,
95
+ onload_dtype: torch.dtype = None,
96
+ onload_device: Union[str, torch.device] = None,
97
+ preparing_dtype: torch.dtype = None,
98
+ preparing_device: Union[str, torch.device] = None,
99
+ computation_dtype: torch.dtype = None,
100
+ computation_device: Union[str, torch.device] = None,
101
+ vram_limit: float = None,
102
+ name: str = "",
103
+ disk_map: DiskMap = None,
104
+ **kwargs
105
+ ):
106
+ super().__init__(
107
+ offload_dtype,
108
+ offload_device,
109
+ onload_dtype,
110
+ onload_device,
111
+ preparing_dtype,
112
+ preparing_device,
113
+ computation_dtype,
114
+ computation_device,
115
+ vram_limit,
116
+ )
117
+ self.module = module
118
+ if offload_dtype == "disk":
119
+ self.name = name
120
+ self.disk_map = disk_map
121
+ self.required_params = [name for name, _ in self.module.named_parameters()]
122
+ self.disk_offload = True
123
+ else:
124
+ self.disk_offload = False
125
+
126
+ def load_from_disk(self, torch_dtype, device, copy_module=False):
127
+ if copy_module:
128
+ module = copy.deepcopy(self.module)
129
+ else:
130
+ module = self.module
131
+ state_dict = {}
132
+ for name in self.required_params:
133
+ param = self.disk_map[self.param_name(name)]
134
+ param = param.to(dtype=torch_dtype, device=device)
135
+ state_dict[name] = param
136
+ module.load_state_dict(state_dict, assign=True)
137
+ module.to(dtype=torch_dtype, device=device)
138
+ return module
139
+
140
+ def offload_to_disk(self, model: torch.nn.Module):
141
+ for buf in model.buffers():
142
+ # If there are some parameters are registed in buffers (not in state dict),
143
+ # We cannot offload the model.
144
+ for children in model.children():
145
+ self.offload_to_disk(children)
146
+ break
147
+ else:
148
+ model.to("meta")
149
+
150
+ def offload(self):
151
+ # offload / onload / preparing -> offload
152
+ if self.state != 0:
153
+ if self.disk_offload:
154
+ self.offload_to_disk(self.module)
155
+ else:
156
+ self.to(dtype=self.offload_dtype, device=self.offload_device)
157
+ self.state = 0
158
+
159
+ def onload(self):
160
+ # offload / onload / preparing -> onload
161
+ if self.state < 1:
162
+ if self.disk_offload and self.onload_device != "disk" and self.offload_device == "disk":
163
+ self.load_from_disk(self.onload_dtype, self.onload_device)
164
+ elif self.onload_device != "disk":
165
+ self.to(dtype=self.onload_dtype, device=self.onload_device)
166
+ self.state = 1
167
+
168
+ def preparing(self):
169
+ # onload / preparing -> preparing
170
+ if self.state != 2:
171
+ if self.disk_offload and self.preparing_device != "disk" and self.onload_device == "disk":
172
+ self.load_from_disk(self.preparing_dtype, self.preparing_device)
173
+ elif self.preparing_device != "disk":
174
+ self.to(dtype=self.preparing_dtype, device=self.preparing_device)
175
+ self.state = 2
176
+
177
+ def cast_to(self, module, dtype, device):
178
+ return copy.deepcopy(module).to(dtype=dtype, device=device)
179
+
180
+ def computation(self):
181
+ # onload / preparing -> computation (temporary)
182
+ if self.state == 2:
183
+ torch_dtype, device = self.preparing_dtype, self.preparing_device
184
+ else:
185
+ torch_dtype, device = self.onload_dtype, self.onload_device
186
+ if torch_dtype == self.computation_dtype and device == self.computation_device:
187
+ module = self.module
188
+ elif self.disk_offload and device == "disk":
189
+ module = self.load_from_disk(self.computation_dtype, self.computation_device, copy_module=True)
190
+ else:
191
+ module = self.cast_to(self.module, dtype=self.computation_dtype, device=self.computation_device)
192
+ return module
193
+
194
+ def forward(self, *args, **kwargs):
195
+ if self.state == 1 and (self.vram_limit is None or self.check_free_vram()):
196
+ self.preparing()
197
+ module = self.computation()
198
+ return module(*args, **kwargs)
199
+
200
+ def __getattr__(self, name):
201
+ if name in self.__dict__ or name == "module":
202
+ return super().__getattr__(name)
203
+ else:
204
+ return getattr(self.module, name)
205
+
206
+
207
+ class AutoWrappedNonRecurseModule(AutoWrappedModule):
208
+
209
+ def __init__(
210
+ self,
211
+ module: torch.nn.Module,
212
+ offload_dtype: torch.dtype = None,
213
+ offload_device: Union[str, torch.device] = None,
214
+ onload_dtype: torch.dtype = None,
215
+ onload_device: Union[str, torch.device] = None,
216
+ preparing_dtype: torch.dtype = None,
217
+ preparing_device: Union[str, torch.device] = None,
218
+ computation_dtype: torch.dtype = None,
219
+ computation_device: Union[str, torch.device] = None,
220
+ vram_limit: float = None,
221
+ name: str = "",
222
+ disk_map: DiskMap = None,
223
+ **kwargs
224
+ ):
225
+ super().__init__(
226
+ module,
227
+ offload_dtype,
228
+ offload_device,
229
+ onload_dtype,
230
+ onload_device,
231
+ preparing_dtype,
232
+ preparing_device,
233
+ computation_dtype,
234
+ computation_device,
235
+ vram_limit,
236
+ name,
237
+ disk_map,
238
+ **kwargs
239
+ )
240
+ if self.disk_offload:
241
+ self.required_params = [name for name, _ in self.module.named_parameters(recurse=False)]
242
+
243
+ def load_from_disk(self, torch_dtype, device, copy_module=False):
244
+ if copy_module:
245
+ module = copy.deepcopy(self.module)
246
+ else:
247
+ module = self.module
248
+ state_dict = {}
249
+ for name in self.required_params:
250
+ param = self.disk_map[self.param_name(name)]
251
+ param = param.to(dtype=torch_dtype, device=device)
252
+ state_dict[name] = param
253
+ module.load_state_dict(state_dict, assign=True, strict=False)
254
+ return module
255
+
256
+ def offload_to_disk(self, model: torch.nn.Module):
257
+ for name in self.required_params:
258
+ getattr(self, name).to("meta")
259
+
260
+ def cast_to(self, module, dtype, device):
261
+ # Parameter casting is implemented in the model architecture.
262
+ return module
263
+
264
+ def __getattr__(self, name):
265
+ if name in self.__dict__ or name == "module":
266
+ return super().__getattr__(name)
267
+ else:
268
+ return getattr(self.module, name)
269
+
270
+
271
+ class AutoWrappedLinear(torch.nn.Linear, AutoTorchModule):
272
+ def __init__(
273
+ self,
274
+ module: torch.nn.Linear,
275
+ offload_dtype: torch.dtype = None,
276
+ offload_device: Union[str, torch.device] = None,
277
+ onload_dtype: torch.dtype = None,
278
+ onload_device: Union[str, torch.device] = None,
279
+ preparing_dtype: torch.dtype = None,
280
+ preparing_device: Union[str, torch.device] = None,
281
+ computation_dtype: torch.dtype = None,
282
+ computation_device: Union[str, torch.device] = None,
283
+ vram_limit: float = None,
284
+ name: str = "",
285
+ disk_map: DiskMap = None,
286
+ **kwargs
287
+ ):
288
+ with skip_model_initialization():
289
+ super().__init__(
290
+ in_features=module.in_features,
291
+ out_features=module.out_features,
292
+ bias=module.bias is not None,
293
+ )
294
+ self.set_dtype_and_device(
295
+ offload_dtype,
296
+ offload_device,
297
+ onload_dtype,
298
+ onload_device,
299
+ preparing_dtype,
300
+ preparing_device,
301
+ computation_dtype,
302
+ computation_device,
303
+ vram_limit,
304
+ )
305
+ self.weight = module.weight
306
+ self.bias = module.bias
307
+ self.state = 0
308
+ self.name = name
309
+ self.lora_A_weights = []
310
+ self.lora_B_weights = []
311
+ self.lora_merger = None
312
+ self.enable_fp8 = computation_dtype in [torch.float8_e4m3fn, torch.float8_e4m3fnuz]
313
+ self.computation_device_type = parse_device_type(self.computation_device)
314
+
315
+ if offload_dtype == "disk":
316
+ self.disk_map = disk_map
317
+ self.disk_offload = True
318
+ else:
319
+ self.disk_offload = False
320
+
321
+ def fp8_linear(
322
+ self,
323
+ input: torch.Tensor,
324
+ weight: torch.Tensor,
325
+ bias: torch.Tensor = None,
326
+ ) -> torch.Tensor:
327
+ device = input.device
328
+ origin_dtype = input.dtype
329
+ origin_shape = input.shape
330
+ input = input.reshape(-1, origin_shape[-1])
331
+
332
+ x_max = torch.max(torch.abs(input), dim=-1, keepdim=True).values
333
+ fp8_max = 448.0
334
+ # For float8_e4m3fnuz, the maximum representable value is half of that of e4m3fn.
335
+ # To avoid overflow and ensure numerical compatibility during FP8 computation,
336
+ # we scale down the input by 2.0 in advance.
337
+ # This scaling will be compensated later during the final result scaling.
338
+ if self.computation_dtype == torch.float8_e4m3fnuz:
339
+ fp8_max = fp8_max / 2.0
340
+ scale_a = torch.clamp(x_max / fp8_max, min=1.0).float().to(device=device)
341
+ scale_b = torch.ones((weight.shape[0], 1)).to(device=device)
342
+ input = input / (scale_a + 1e-8)
343
+ input = input.to(self.computation_dtype)
344
+ weight = weight.to(self.computation_dtype)
345
+ bias = bias.to(torch.bfloat16)
346
+
347
+ result = torch._scaled_mm(
348
+ input,
349
+ weight.T,
350
+ scale_a=scale_a,
351
+ scale_b=scale_b.T,
352
+ bias=bias,
353
+ out_dtype=origin_dtype,
354
+ )
355
+ new_shape = origin_shape[:-1] + result.shape[-1:]
356
+ result = result.reshape(new_shape)
357
+ return result
358
+
359
+ def load_from_disk(self, torch_dtype, device, assign=True):
360
+ weight = self.disk_map[self.name + ".weight"].to(dtype=torch_dtype, device=device)
361
+ bias = None if self.bias is None else self.disk_map[self.name + ".bias"].to(dtype=torch_dtype, device=device)
362
+ if assign:
363
+ state_dict = {"weight": weight}
364
+ if bias is not None: state_dict["bias"] = bias
365
+ self.load_state_dict(state_dict, assign=True)
366
+ return weight, bias
367
+
368
+ def offload(self):
369
+ # offload / onload / preparing -> offload
370
+ if self.state != 0:
371
+ if self.disk_offload:
372
+ self.to("meta")
373
+ else:
374
+ self.to(dtype=self.offload_dtype, device=self.offload_device)
375
+ self.state = 0
376
+
377
+ def onload(self):
378
+ # offload / onload / preparing -> onload
379
+ if self.state < 1:
380
+ if self.disk_offload and self.onload_device != "disk" and self.offload_device == "disk":
381
+ self.load_from_disk(self.onload_dtype, self.onload_device)
382
+ elif self.onload_device != "disk":
383
+ self.to(dtype=self.onload_dtype, device=self.onload_device)
384
+ self.state = 1
385
+
386
+ def preparing(self):
387
+ # onload / preparing -> preparing
388
+ if self.state != 2:
389
+ if self.disk_offload and self.preparing_device != "disk" and self.onload_device == "disk":
390
+ self.load_from_disk(self.preparing_dtype, self.preparing_device)
391
+ elif self.preparing_device != "disk":
392
+ self.to(dtype=self.preparing_dtype, device=self.preparing_device)
393
+ self.state = 2
394
+
395
+ def computation(self):
396
+ # onload / preparing -> computation (temporary)
397
+ if self.state == 2:
398
+ torch_dtype, device = self.preparing_dtype, self.preparing_device
399
+ else:
400
+ torch_dtype, device = self.onload_dtype, self.onload_device
401
+ if torch_dtype == self.computation_dtype and device == self.computation_device:
402
+ weight, bias = self.weight, self.bias
403
+ elif self.disk_offload and device == "disk":
404
+ weight, bias = self.load_from_disk(self.computation_dtype, self.computation_device, assign=False)
405
+ else:
406
+ weight = self.cast_to(self.weight, self.computation_dtype, self.computation_device)
407
+ bias = None if self.bias is None else self.cast_to(self.bias, self.computation_dtype, self.computation_device)
408
+ return weight, bias
409
+
410
+ def linear_forward(self, x, weight, bias):
411
+ if self.enable_fp8:
412
+ out = self.fp8_linear(x, weight, bias)
413
+ else:
414
+ out = torch.nn.functional.linear(x, weight, bias)
415
+ return out
416
+
417
+ def lora_forward(self, x, out):
418
+ if self.lora_merger is None:
419
+ for lora_A, lora_B in zip(self.lora_A_weights, self.lora_B_weights):
420
+ out = out + x @ lora_A.T @ lora_B.T
421
+ else:
422
+ lora_output = []
423
+ for lora_A, lora_B in zip(self.lora_A_weights, self.lora_B_weights):
424
+ lora_output.append(x @ lora_A.T @ lora_B.T)
425
+ lora_output = torch.stack(lora_output)
426
+ out = self.lora_merger(out, lora_output)
427
+ return out
428
+
429
+ def forward(self, x, *args, **kwargs):
430
+ if self.state == 1 and (self.vram_limit is None or self.check_free_vram()):
431
+ self.preparing()
432
+ weight, bias = self.computation()
433
+ out = self.linear_forward(x, weight, bias)
434
+ if len(self.lora_A_weights) > 0:
435
+ out = self.lora_forward(x, out)
436
+ return out
437
+
438
+
439
+ def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, vram_config: dict, vram_limit=None, name_prefix="", disk_map=None, **kwargs):
440
+ if isinstance(model, AutoWrappedNonRecurseModule):
441
+ model = model.module
442
+ for name, module in model.named_children():
443
+ layer_name = name if name_prefix == "" else name_prefix + "." + name
444
+ for source_module, target_module in module_map.items():
445
+ if isinstance(module, source_module):
446
+ module_ = target_module(module, **vram_config, vram_limit=vram_limit, name=layer_name, disk_map=disk_map, **kwargs)
447
+ if isinstance(module_, AutoWrappedNonRecurseModule):
448
+ enable_vram_management_recursively(module_, module_map, vram_config, vram_limit=vram_limit, name_prefix=layer_name, disk_map=disk_map, **kwargs)
449
+ setattr(model, name, module_)
450
+ break
451
+ else:
452
+ enable_vram_management_recursively(module, module_map, vram_config, vram_limit=vram_limit, name_prefix=layer_name, disk_map=disk_map, **kwargs)
453
+
454
+
455
+ def fill_vram_config(model, vram_config):
456
+ vram_config_ = vram_config.copy()
457
+ vram_config_["onload_dtype"] = vram_config["computation_dtype"]
458
+ vram_config_["onload_device"] = vram_config["computation_device"]
459
+ vram_config_["preparing_dtype"] = vram_config["computation_dtype"]
460
+ vram_config_["preparing_device"] = vram_config["computation_device"]
461
+ for k in vram_config:
462
+ if vram_config[k] != vram_config_[k]:
463
+ print(f"No fine-grained VRAM configuration is provided for {model.__class__.__name__}. [`onload`, `preparing`, `computation`] will be the same state. `vram_config` is set to {vram_config_}")
464
+ break
465
+ return vram_config_
466
+
467
+
468
+ def enable_vram_management(model: torch.nn.Module, module_map: dict, vram_config: dict, vram_limit=None, disk_map=None, **kwargs):
469
+ for source_module, target_module in module_map.items():
470
+ # If no fine-grained VRAM configuration is provided, the entire model will be managed uniformly.
471
+ if isinstance(model, source_module):
472
+ vram_config = fill_vram_config(model, vram_config)
473
+ model = target_module(model, **vram_config, vram_limit=vram_limit, disk_map=disk_map, **kwargs)
474
+ break
475
+ else:
476
+ enable_vram_management_recursively(model, module_map, vram_config, vram_limit=vram_limit, disk_map=disk_map, **kwargs)
477
+ # `vram_management_enabled` is a flag that allows the pipeline to determine whether VRAM management is enabled.
478
+ model.vram_management_enabled = True
479
+ return model
DiffSynth-Studio/diffsynth/diffusion/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .flow_match import FlowMatchScheduler
2
+ from .training_module import DiffusionTrainingModule
3
+ from .logger import ModelLogger
4
+ from .runner import launch_training_task, launch_data_process_task
5
+ from .parsers import *
6
+ from .loss import *
DiffSynth-Studio/diffsynth/diffusion/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (440 Bytes). View file
 
DiffSynth-Studio/diffsynth/diffusion/__pycache__/base_pipeline.cpython-39.pyc ADDED
Binary file (16 kB). View file
 
DiffSynth-Studio/diffsynth/diffusion/__pycache__/flow_match.cpython-39.pyc ADDED
Binary file (5.86 kB). View file
 
DiffSynth-Studio/diffsynth/diffusion/__pycache__/logger.cpython-39.pyc ADDED
Binary file (2.1 kB). View file