| from typing import Any, Dict, Optional, Tuple, Union |
|
|
| import torch |
|
|
| from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers |
| from diffusers.models.modeling_outputs import Transformer2DModelOutput |
| from diffusers.models.transformers.transformer_wan import WanTransformer3DModel |
| from diffusers.models.attention_processor import AttentionProcessor |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class NagWanTransformer3DModel(WanTransformer3DModel): |
| @property |
| |
| def attn_processors(self) -> Dict[str, AttentionProcessor]: |
| r""" |
| Returns: |
| `dict` of attention processors: A dictionary containing all attention processors used in the model with |
| indexed by its weight name. |
| """ |
| |
| processors = {} |
|
|
| def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): |
| if hasattr(module, "get_processor"): |
| processors[f"{name}.processor"] = module.get_processor() |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
| return processors |
|
|
| for name, module in self.named_children(): |
| fn_recursive_add_processors(name, module, processors) |
|
|
| return processors |
|
|
| |
| def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): |
| r""" |
| Sets the attention processor to use to compute attention. |
| |
| Parameters: |
| processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
| The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| for **all** `Attention` layers. |
| |
| If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
| processor. This is strongly recommended when setting trainable attention processors. |
| |
| """ |
| count = len(self.attn_processors.keys()) |
|
|
| if isinstance(processor, dict) and len(processor) != count: |
| raise ValueError( |
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| ) |
|
|
| def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
| if hasattr(module, "set_processor"): |
| if not isinstance(processor, dict): |
| module.set_processor(processor) |
| else: |
| module.set_processor(processor.pop(f"{name}.processor")) |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
| for name, module in self.named_children(): |
| fn_recursive_attn_processor(name, module, processor) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| timestep: torch.LongTensor, |
| encoder_hidden_states: torch.Tensor, |
| encoder_hidden_states_image: Optional[torch.Tensor] = None, |
| return_dict: bool = True, |
| attention_kwargs: Optional[Dict[str, Any]] = None, |
| ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: |
| if attention_kwargs is not None: |
| attention_kwargs = attention_kwargs.copy() |
| lora_scale = attention_kwargs.pop("scale", 1.0) |
| else: |
| lora_scale = 1.0 |
|
|
| if USE_PEFT_BACKEND: |
| |
| scale_lora_layers(self, lora_scale) |
| else: |
| if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: |
| logger.warning( |
| "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." |
| ) |
|
|
| batch_size, num_channels, num_frames, height, width = hidden_states.shape |
| p_t, p_h, p_w = self.config.patch_size |
| post_patch_num_frames = num_frames // p_t |
| post_patch_height = height // p_h |
| post_patch_width = width // p_w |
|
|
| rotary_emb = self.rope(hidden_states) |
|
|
| hidden_states = self.patch_embedding(hidden_states) |
| hidden_states = hidden_states.flatten(2).transpose(1, 2) |
|
|
| temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder( |
| timestep, encoder_hidden_states, encoder_hidden_states_image |
| ) |
| timestep_proj = timestep_proj.unflatten(1, (6, -1)) |
|
|
| if encoder_hidden_states_image is not None: |
| bs_encoder_hidden_states = len(encoder_hidden_states) |
| bs_encoder_hidden_states_image = len(encoder_hidden_states_image) |
| bs_scale = bs_encoder_hidden_states / bs_encoder_hidden_states_image |
| assert bs_scale in [1, 2, 3] |
| if bs_scale != 1: |
| encoder_hidden_states_image = encoder_hidden_states_image.tile(int(bs_scale), 1, 1) |
| encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1) |
|
|
| |
| if torch.is_grad_enabled() and self.gradient_checkpointing: |
| for block in self.blocks: |
| hidden_states = self._gradient_checkpointing_func( |
| block, hidden_states, encoder_hidden_states, timestep_proj, rotary_emb |
| ) |
| else: |
| for block in self.blocks: |
| hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, rotary_emb) |
|
|
| |
| shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1) |
|
|
| |
| |
| |
| |
| shift = shift.to(hidden_states.device) |
| scale = scale.to(hidden_states.device) |
|
|
| hidden_states = (self.norm_out(hidden_states.float()) * (1 + scale) + shift).type_as(hidden_states) |
| hidden_states = self.proj_out(hidden_states) |
|
|
| hidden_states = hidden_states.reshape( |
| batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1 |
| ) |
| hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) |
| output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) |
|
|
| if USE_PEFT_BACKEND: |
| |
| unscale_lora_layers(self, lora_scale) |
|
|
| if not return_dict: |
| return (output,) |
|
|
| return Transformer2DModelOutput(sample=output) |