|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| from dataclasses import dataclass
|
| from typing import Any, Dict, List, Optional, Tuple, Union
|
|
|
| import torch
|
| import torch.nn as nn
|
| import torch.utils.checkpoint
|
|
|
| from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| from diffusers.loaders import UNet2DConditionLoadersMixin
|
| from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
|
| from diffusers.models.activations import get_activation
|
| from diffusers.models.attention_processor import (
|
| ADDED_KV_ATTENTION_PROCESSORS,
|
| CROSS_ATTENTION_PROCESSORS,
|
| Attention,
|
| AttentionProcessor,
|
| AttnAddedKVProcessor,
|
| AttnProcessor,
|
| )
|
| from einops import rearrange
|
|
|
| from diffusers.models.embeddings import (
|
| GaussianFourierProjection,
|
| ImageHintTimeEmbedding,
|
| ImageProjection,
|
| ImageTimeEmbedding,
|
| PositionNet,
|
| TextImageProjection,
|
| TextImageTimeEmbedding,
|
| TextTimeEmbedding,
|
| TimestepEmbedding,
|
| Timesteps,
|
| )
|
|
|
|
|
| from diffusers.models.modeling_utils import ModelMixin
|
| from src.unet_block_hacked_tryon import (
|
| UNetMidBlock2D,
|
| UNetMidBlock2DCrossAttn,
|
| UNetMidBlock2DSimpleCrossAttn,
|
| get_down_block,
|
| get_up_block,
|
| )
|
| from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
|
| from diffusers.models.transformer_2d import Transformer2DModel
|
| import math
|
|
|
| from ip_adapter.ip_adapter import Resampler
|
|
|
|
|
| logger = logging.get_logger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| def zero_module(module):
|
| for p in module.parameters():
|
| nn.init.zeros_(p)
|
| return module
|
|
|
| @dataclass
|
| class UNet2DConditionOutput(BaseOutput):
|
| """
|
| The output of [`UNet2DConditionModel`].
|
|
|
| Args:
|
| sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
|
| """
|
|
|
| sample: torch.FloatTensor = None
|
|
|
|
|
| class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
|
| r"""
|
| A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
|
| shaped output.
|
|
|
| This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
| for all models (such as downloading or saving).
|
|
|
| Parameters:
|
| sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
|
| Height and width of input/output sample.
|
| in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
|
| out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
|
| center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
|
| flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
|
| Whether to flip the sin to cos in the time embedding.
|
| freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
|
| down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
|
| The tuple of downsample blocks to use.
|
| mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
|
| Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
|
| `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
|
| up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
|
| The tuple of upsample blocks to use.
|
| only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
|
| Whether to include self-attention in the basic transformer blocks, see
|
| [`~models.attention.BasicTransformerBlock`].
|
| block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
| The tuple of output channels for each block.
|
| layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
|
| downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
|
| mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
|
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
| act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
| norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
|
| If `None`, normalization and activation layers is skipped in post-processing.
|
| norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
|
| cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
|
| The dimension of the cross attention features.
|
| transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
|
| The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
|
| [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
|
| [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
|
| reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
|
| The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
|
| blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
|
| [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
|
| [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
|
| encoder_hid_dim (`int`, *optional*, defaults to None):
|
| If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
|
| dimension to `cross_attention_dim`.
|
| encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
|
| If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
|
| embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
|
| attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
|
| num_attention_heads (`int`, *optional*):
|
| The number of attention heads. If not defined, defaults to `attention_head_dim`
|
| resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
|
| for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
|
| class_embed_type (`str`, *optional*, defaults to `None`):
|
| The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
|
| `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
|
| addition_embed_type (`str`, *optional*, defaults to `None`):
|
| Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
|
| "text". "text" will use the `TextTimeEmbedding` layer.
|
| addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
|
| Dimension for the timestep embeddings.
|
| num_class_embeds (`int`, *optional*, defaults to `None`):
|
| Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
|
| class conditioning with `class_embed_type` equal to `None`.
|
| time_embedding_type (`str`, *optional*, defaults to `positional`):
|
| The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
|
| time_embedding_dim (`int`, *optional*, defaults to `None`):
|
| An optional override for the dimension of the projected time embedding.
|
| time_embedding_act_fn (`str`, *optional*, defaults to `None`):
|
| Optional activation function to use only once on the time embeddings before they are passed to the rest of
|
| the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
|
| timestep_post_act (`str`, *optional*, defaults to `None`):
|
| The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
|
| time_cond_proj_dim (`int`, *optional*, defaults to `None`):
|
| The dimension of `cond_proj` layer in the timestep embedding.
|
| conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`,
|
| *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`,
|
| *optional*): The dimension of the `class_labels` input when
|
| `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
|
| class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
|
| embeddings with the class embeddings.
|
| mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
|
| Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
|
| `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
|
| `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
|
| otherwise.
|
| """
|
|
|
| _supports_gradient_checkpointing = True
|
|
|
| @register_to_config
|
| def __init__(
|
| self,
|
| sample_size: Optional[int] = None,
|
| in_channels: int = 4,
|
| out_channels: int = 4,
|
| center_input_sample: bool = False,
|
| flip_sin_to_cos: bool = True,
|
| freq_shift: int = 0,
|
| down_block_types: Tuple[str] = (
|
| "CrossAttnDownBlock2D",
|
| "CrossAttnDownBlock2D",
|
| "CrossAttnDownBlock2D",
|
| "DownBlock2D",
|
| ),
|
| mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
|
| up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
|
| only_cross_attention: Union[bool, Tuple[bool]] = False,
|
| block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
|
| layers_per_block: Union[int, Tuple[int]] = 2,
|
| downsample_padding: int = 1,
|
| mid_block_scale_factor: float = 1,
|
| dropout: float = 0.0,
|
| act_fn: str = "silu",
|
| norm_num_groups: Optional[int] = 32,
|
| norm_eps: float = 1e-5,
|
| cross_attention_dim: Union[int, Tuple[int]] = 1280,
|
| transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
|
| reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
|
| encoder_hid_dim: Optional[int] = None,
|
| encoder_hid_dim_type: Optional[str] = None,
|
| attention_head_dim: Union[int, Tuple[int]] = 8,
|
| num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
|
| dual_cross_attention: bool = False,
|
| use_linear_projection: bool = False,
|
| class_embed_type: Optional[str] = None,
|
| addition_embed_type: Optional[str] = None,
|
| addition_time_embed_dim: Optional[int] = None,
|
| num_class_embeds: Optional[int] = None,
|
| upcast_attention: bool = False,
|
| resnet_time_scale_shift: str = "default",
|
| resnet_skip_time_act: bool = False,
|
| resnet_out_scale_factor: int = 1.0,
|
| time_embedding_type: str = "positional",
|
| time_embedding_dim: Optional[int] = None,
|
| time_embedding_act_fn: Optional[str] = None,
|
| timestep_post_act: Optional[str] = None,
|
| time_cond_proj_dim: Optional[int] = None,
|
| conv_in_kernel: int = 3,
|
| conv_out_kernel: int = 3,
|
| projection_class_embeddings_input_dim: Optional[int] = None,
|
| attention_type: str = "default",
|
| class_embeddings_concat: bool = False,
|
| mid_block_only_cross_attention: Optional[bool] = None,
|
| cross_attention_norm: Optional[str] = None,
|
| addition_embed_type_num_heads=64,
|
| ):
|
| super().__init__()
|
|
|
| self.sample_size = sample_size
|
|
|
| if num_attention_heads is not None:
|
| raise ValueError(
|
| "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
|
| )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| num_attention_heads = num_attention_heads or attention_head_dim
|
|
|
|
|
| if len(down_block_types) != len(up_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
|
| )
|
|
|
| if len(block_out_channels) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
|
| )
|
|
|
| if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
|
| )
|
|
|
| if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
|
| )
|
|
|
| if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
|
| )
|
|
|
| if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
|
| )
|
|
|
| if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
|
| raise ValueError(
|
| f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
|
| )
|
| if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
|
| for layer_number_per_block in transformer_layers_per_block:
|
| if isinstance(layer_number_per_block, list):
|
| raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
|
|
|
|
|
| conv_in_padding = (conv_in_kernel - 1) // 2
|
| self.conv_in = nn.Conv2d(
|
| in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
|
| )
|
|
|
|
|
| if time_embedding_type == "fourier":
|
| time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
|
| if time_embed_dim % 2 != 0:
|
| raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
|
| self.time_proj = GaussianFourierProjection(
|
| time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
|
| )
|
| timestep_input_dim = time_embed_dim
|
| elif time_embedding_type == "positional":
|
| time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
|
|
|
| self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
| timestep_input_dim = block_out_channels[0]
|
| else:
|
| raise ValueError(
|
| f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
|
| )
|
|
|
| self.time_embedding = TimestepEmbedding(
|
| timestep_input_dim,
|
| time_embed_dim,
|
| act_fn=act_fn,
|
| post_act_fn=timestep_post_act,
|
| cond_proj_dim=time_cond_proj_dim,
|
| )
|
|
|
| if encoder_hid_dim_type is None and encoder_hid_dim is not None:
|
| encoder_hid_dim_type = "text_proj"
|
| self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
|
| logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
|
|
|
| if encoder_hid_dim is None and encoder_hid_dim_type is not None:
|
| raise ValueError(
|
| f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
|
| )
|
|
|
| if encoder_hid_dim_type == "text_proj":
|
| self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
|
| elif encoder_hid_dim_type == "text_image_proj":
|
|
|
|
|
|
|
| self.encoder_hid_proj = TextImageProjection(
|
| text_embed_dim=encoder_hid_dim,
|
| image_embed_dim=cross_attention_dim,
|
| cross_attention_dim=cross_attention_dim,
|
| )
|
| elif encoder_hid_dim_type == "image_proj":
|
|
|
| self.encoder_hid_proj = ImageProjection(
|
| image_embed_dim=encoder_hid_dim,
|
| cross_attention_dim=cross_attention_dim,
|
| )
|
| elif encoder_hid_dim_type == "ip_image_proj":
|
|
|
| self.encoder_hid_proj = Resampler(
|
| dim=1280,
|
| depth=4,
|
| dim_head=64,
|
| heads=20,
|
| num_queries=16,
|
| embedding_dim=encoder_hid_dim,
|
| output_dim=self.config.cross_attention_dim,
|
| ff_mult=4,
|
| )
|
|
|
|
|
| elif encoder_hid_dim_type is not None:
|
| raise ValueError(
|
| f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
|
| )
|
| else:
|
| self.encoder_hid_proj = None
|
|
|
|
|
| if class_embed_type is None and num_class_embeds is not None:
|
| self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
| elif class_embed_type == "timestep":
|
| self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
|
| elif class_embed_type == "identity":
|
| self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
| elif class_embed_type == "projection":
|
| if projection_class_embeddings_input_dim is None:
|
| raise ValueError(
|
| "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
|
| )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
|
| elif class_embed_type == "simple_projection":
|
| if projection_class_embeddings_input_dim is None:
|
| raise ValueError(
|
| "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
|
| )
|
| self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
|
| else:
|
| self.class_embedding = None
|
|
|
| if addition_embed_type == "text":
|
| if encoder_hid_dim is not None:
|
| text_time_embedding_from_dim = encoder_hid_dim
|
| else:
|
| text_time_embedding_from_dim = cross_attention_dim
|
|
|
| self.add_embedding = TextTimeEmbedding(
|
| text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
|
| )
|
| elif addition_embed_type == "text_image":
|
|
|
|
|
|
|
| self.add_embedding = TextImageTimeEmbedding(
|
| text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
|
| )
|
| elif addition_embed_type == "text_time":
|
| self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
|
| self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
|
| elif addition_embed_type == "image":
|
|
|
| self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
|
| elif addition_embed_type == "image_hint":
|
|
|
| self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
|
| elif addition_embed_type is not None:
|
| raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
|
|
|
| if time_embedding_act_fn is None:
|
| self.time_embed_act = None
|
| else:
|
| self.time_embed_act = get_activation(time_embedding_act_fn)
|
|
|
| self.down_blocks = nn.ModuleList([])
|
| self.up_blocks = nn.ModuleList([])
|
|
|
| if isinstance(only_cross_attention, bool):
|
| if mid_block_only_cross_attention is None:
|
| mid_block_only_cross_attention = only_cross_attention
|
|
|
| only_cross_attention = [only_cross_attention] * len(down_block_types)
|
|
|
| if mid_block_only_cross_attention is None:
|
| mid_block_only_cross_attention = False
|
|
|
| if isinstance(num_attention_heads, int):
|
| num_attention_heads = (num_attention_heads,) * len(down_block_types)
|
|
|
| if isinstance(attention_head_dim, int):
|
| attention_head_dim = (attention_head_dim,) * len(down_block_types)
|
|
|
| if isinstance(cross_attention_dim, int):
|
| cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
|
|
|
| if isinstance(layers_per_block, int):
|
| layers_per_block = [layers_per_block] * len(down_block_types)
|
|
|
| if isinstance(transformer_layers_per_block, int):
|
| transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
|
| if class_embeddings_concat:
|
|
|
|
|
|
|
| blocks_time_embed_dim = time_embed_dim * 2
|
| else:
|
| blocks_time_embed_dim = time_embed_dim
|
|
|
|
|
| output_channel = block_out_channels[0]
|
| for i, down_block_type in enumerate(down_block_types):
|
| input_channel = output_channel
|
| output_channel = block_out_channels[i]
|
| is_final_block = i == len(block_out_channels) - 1
|
|
|
| down_block = get_down_block(
|
| down_block_type,
|
| num_layers=layers_per_block[i],
|
| transformer_layers_per_block=transformer_layers_per_block[i],
|
| in_channels=input_channel,
|
| out_channels=output_channel,
|
| temb_channels=blocks_time_embed_dim,
|
| add_downsample=not is_final_block,
|
| resnet_eps=norm_eps,
|
| resnet_act_fn=act_fn,
|
| resnet_groups=norm_num_groups,
|
| cross_attention_dim=cross_attention_dim[i],
|
| num_attention_heads=num_attention_heads[i],
|
| downsample_padding=downsample_padding,
|
| dual_cross_attention=dual_cross_attention,
|
| use_linear_projection=use_linear_projection,
|
| only_cross_attention=only_cross_attention[i],
|
| upcast_attention=upcast_attention,
|
| resnet_time_scale_shift=resnet_time_scale_shift,
|
| attention_type=attention_type,
|
| resnet_skip_time_act=resnet_skip_time_act,
|
| resnet_out_scale_factor=resnet_out_scale_factor,
|
| cross_attention_norm=cross_attention_norm,
|
| attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
|
| dropout=dropout,
|
| )
|
| self.down_blocks.append(down_block)
|
|
|
|
|
| if mid_block_type == "UNetMidBlock2DCrossAttn":
|
| self.mid_block = UNetMidBlock2DCrossAttn(
|
| transformer_layers_per_block=transformer_layers_per_block[-1],
|
| in_channels=block_out_channels[-1],
|
| temb_channels=blocks_time_embed_dim,
|
| dropout=dropout,
|
| resnet_eps=norm_eps,
|
| resnet_act_fn=act_fn,
|
| output_scale_factor=mid_block_scale_factor,
|
| resnet_time_scale_shift=resnet_time_scale_shift,
|
| cross_attention_dim=cross_attention_dim[-1],
|
| num_attention_heads=num_attention_heads[-1],
|
| resnet_groups=norm_num_groups,
|
| dual_cross_attention=dual_cross_attention,
|
| use_linear_projection=use_linear_projection,
|
| upcast_attention=upcast_attention,
|
| attention_type=attention_type,
|
| )
|
| elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
|
| self.mid_block = UNetMidBlock2DSimpleCrossAttn(
|
| in_channels=block_out_channels[-1],
|
| temb_channels=blocks_time_embed_dim,
|
| dropout=dropout,
|
| resnet_eps=norm_eps,
|
| resnet_act_fn=act_fn,
|
| output_scale_factor=mid_block_scale_factor,
|
| cross_attention_dim=cross_attention_dim[-1],
|
| attention_head_dim=attention_head_dim[-1],
|
| resnet_groups=norm_num_groups,
|
| resnet_time_scale_shift=resnet_time_scale_shift,
|
| skip_time_act=resnet_skip_time_act,
|
| only_cross_attention=mid_block_only_cross_attention,
|
| cross_attention_norm=cross_attention_norm,
|
| )
|
| elif mid_block_type == "UNetMidBlock2D":
|
| self.mid_block = UNetMidBlock2D(
|
| in_channels=block_out_channels[-1],
|
| temb_channels=blocks_time_embed_dim,
|
| dropout=dropout,
|
| num_layers=0,
|
| resnet_eps=norm_eps,
|
| resnet_act_fn=act_fn,
|
| output_scale_factor=mid_block_scale_factor,
|
| resnet_groups=norm_num_groups,
|
| resnet_time_scale_shift=resnet_time_scale_shift,
|
| add_attention=False,
|
| )
|
| elif mid_block_type is None:
|
| self.mid_block = None
|
| else:
|
| raise ValueError(f"unknown mid_block_type : {mid_block_type}")
|
|
|
|
|
| self.num_upsamplers = 0
|
|
|
|
|
| reversed_block_out_channels = list(reversed(block_out_channels))
|
| reversed_num_attention_heads = list(reversed(num_attention_heads))
|
| reversed_layers_per_block = list(reversed(layers_per_block))
|
| reversed_cross_attention_dim = list(reversed(cross_attention_dim))
|
| reversed_transformer_layers_per_block = (
|
| list(reversed(transformer_layers_per_block))
|
| if reverse_transformer_layers_per_block is None
|
| else reverse_transformer_layers_per_block
|
| )
|
| only_cross_attention = list(reversed(only_cross_attention))
|
|
|
| output_channel = reversed_block_out_channels[0]
|
| for i, up_block_type in enumerate(up_block_types):
|
| is_final_block = i == len(block_out_channels) - 1
|
|
|
| prev_output_channel = output_channel
|
| output_channel = reversed_block_out_channels[i]
|
| input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
|
|
|
|
| if not is_final_block:
|
| add_upsample = True
|
| self.num_upsamplers += 1
|
| else:
|
| add_upsample = False
|
| up_block = get_up_block(
|
| up_block_type,
|
| num_layers=reversed_layers_per_block[i] + 1,
|
| transformer_layers_per_block=reversed_transformer_layers_per_block[i],
|
| in_channels=input_channel,
|
| out_channels=output_channel,
|
| prev_output_channel=prev_output_channel,
|
| temb_channels=blocks_time_embed_dim,
|
| add_upsample=add_upsample,
|
| resnet_eps=norm_eps,
|
| resnet_act_fn=act_fn,
|
| resolution_idx=i,
|
| resnet_groups=norm_num_groups,
|
| cross_attention_dim=reversed_cross_attention_dim[i],
|
| num_attention_heads=reversed_num_attention_heads[i],
|
| dual_cross_attention=dual_cross_attention,
|
| use_linear_projection=use_linear_projection,
|
| only_cross_attention=only_cross_attention[i],
|
| upcast_attention=upcast_attention,
|
| resnet_time_scale_shift=resnet_time_scale_shift,
|
| attention_type=attention_type,
|
| resnet_skip_time_act=resnet_skip_time_act,
|
| resnet_out_scale_factor=resnet_out_scale_factor,
|
| cross_attention_norm=cross_attention_norm,
|
| attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
|
| dropout=dropout,
|
| )
|
|
|
| self.up_blocks.append(up_block)
|
| prev_output_channel = output_channel
|
|
|
|
|
|
|
|
|
|
|
| if norm_num_groups is not None:
|
| self.conv_norm_out = nn.GroupNorm(
|
| num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
|
| )
|
|
|
| self.conv_act = get_activation(act_fn)
|
|
|
| else:
|
| self.conv_norm_out = None
|
| self.conv_act = None
|
|
|
| conv_out_padding = (conv_out_kernel - 1) // 2
|
| self.conv_out = nn.Conv2d(
|
| block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
|
| )
|
|
|
| if attention_type in ["gated", "gated-text-image"]:
|
| positive_len = 768
|
| if isinstance(cross_attention_dim, int):
|
| positive_len = cross_attention_dim
|
| elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
|
| positive_len = cross_attention_dim[0]
|
|
|
| feature_type = "text-only" if attention_type == "gated" else "text-image"
|
| self.position_net = PositionNet(
|
| positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
|
| )
|
|
|
|
|
|
|
| from ip_adapter.attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor
|
|
|
| attn_procs = {}
|
| for name in self.attn_processors.keys():
|
| cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
|
| if name.startswith("mid_block"):
|
| hidden_size = self.config.block_out_channels[-1]
|
| elif name.startswith("up_blocks"):
|
| block_id = int(name[len("up_blocks.")])
|
| hidden_size = list(reversed(self.config.block_out_channels))[block_id]
|
| elif name.startswith("down_blocks"):
|
| block_id = int(name[len("down_blocks.")])
|
| hidden_size = self.config.block_out_channels[block_id]
|
| if cross_attention_dim is None:
|
| attn_procs[name] = AttnProcessor()
|
| else:
|
| layer_name = name.split(".processor")[0]
|
| attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, num_tokens=16)
|
| self.set_attn_processor(attn_procs)
|
|
|
|
|
| @property
|
| def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
| r"""
|
| Returns:
|
| `dict` of attention processors: A dictionary containing all attention processors used in the model with
|
| indexed by its weight name.
|
| """
|
|
|
| processors = {}
|
|
|
| def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
| if hasattr(module, "get_processor"):
|
| processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
|
|
| for sub_name, child in module.named_children():
|
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
|
|
| return processors
|
|
|
| for name, module in self.named_children():
|
| fn_recursive_add_processors(name, module, processors)
|
|
|
| return processors
|
|
|
| def set_attn_processor(
|
| self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
|
| ):
|
| r"""
|
| Sets the attention processor to use to compute attention.
|
|
|
| Parameters:
|
| processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
| The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
| for **all** `Attention` layers.
|
|
|
| If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
| processor. This is strongly recommended when setting trainable attention processors.
|
|
|
| """
|
| count = len(self.attn_processors.keys())
|
|
|
| if isinstance(processor, dict) and len(processor) != count:
|
| raise ValueError(
|
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
| )
|
|
|
| def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
| if hasattr(module, "set_processor"):
|
| if not isinstance(processor, dict):
|
| module.set_processor(processor, _remove_lora=_remove_lora)
|
| else:
|
| module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
|
|
|
| for sub_name, child in module.named_children():
|
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
|
|
| for name, module in self.named_children():
|
| fn_recursive_attn_processor(name, module, processor)
|
|
|
| def set_default_attn_processor(self):
|
| """
|
| Disables custom attention processors and sets the default attention implementation.
|
| """
|
| if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
| processor = AttnAddedKVProcessor()
|
| elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
| processor = AttnProcessor()
|
| else:
|
| raise ValueError(
|
| f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
|
| )
|
|
|
| self.set_attn_processor(processor, _remove_lora=True)
|
|
|
| def set_attention_slice(self, slice_size):
|
| r"""
|
| Enable sliced attention computation.
|
|
|
| When this option is enabled, the attention module splits the input tensor in slices to compute attention in
|
| several steps. This is useful for saving some memory in exchange for a small decrease in speed.
|
|
|
| Args:
|
| slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
|
| When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
|
| `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
|
| provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
|
| must be a multiple of `slice_size`.
|
| """
|
| sliceable_head_dims = []
|
|
|
| def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
|
| if hasattr(module, "set_attention_slice"):
|
| sliceable_head_dims.append(module.sliceable_head_dim)
|
|
|
| for child in module.children():
|
| fn_recursive_retrieve_sliceable_dims(child)
|
|
|
|
|
| for module in self.children():
|
| fn_recursive_retrieve_sliceable_dims(module)
|
|
|
| num_sliceable_layers = len(sliceable_head_dims)
|
|
|
| if slice_size == "auto":
|
|
|
|
|
| slice_size = [dim // 2 for dim in sliceable_head_dims]
|
| elif slice_size == "max":
|
|
|
| slice_size = num_sliceable_layers * [1]
|
|
|
| slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
|
|
|
| if len(slice_size) != len(sliceable_head_dims):
|
| raise ValueError(
|
| f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
|
| f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
|
| )
|
|
|
| for i in range(len(slice_size)):
|
| size = slice_size[i]
|
| dim = sliceable_head_dims[i]
|
| if size is not None and size > dim:
|
| raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
|
|
|
|
|
|
|
|
|
| def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
|
| if hasattr(module, "set_attention_slice"):
|
| module.set_attention_slice(slice_size.pop())
|
|
|
| for child in module.children():
|
| fn_recursive_set_attention_slice(child, slice_size)
|
|
|
| reversed_slice_size = list(reversed(slice_size))
|
| for module in self.children():
|
| fn_recursive_set_attention_slice(module, reversed_slice_size)
|
|
|
| def _set_gradient_checkpointing(self, module, value=False):
|
| if hasattr(module, "gradient_checkpointing"):
|
| module.gradient_checkpointing = value
|
|
|
| def enable_freeu(self, s1, s2, b1, b2):
|
| r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
|
|
|
| The suffixes after the scaling factors represent the stage blocks where they are being applied.
|
|
|
| Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
|
| are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
|
|
|
| Args:
|
| s1 (`float`):
|
| Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
|
| mitigate the "oversmoothing effect" in the enhanced denoising process.
|
| s2 (`float`):
|
| Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
|
| mitigate the "oversmoothing effect" in the enhanced denoising process.
|
| b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
|
| b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
|
| """
|
| for i, upsample_block in enumerate(self.up_blocks):
|
| setattr(upsample_block, "s1", s1)
|
| setattr(upsample_block, "s2", s2)
|
| setattr(upsample_block, "b1", b1)
|
| setattr(upsample_block, "b2", b2)
|
|
|
| def disable_freeu(self):
|
| """Disables the FreeU mechanism."""
|
| freeu_keys = {"s1", "s2", "b1", "b2"}
|
| for i, upsample_block in enumerate(self.up_blocks):
|
| for k in freeu_keys:
|
| if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
|
| setattr(upsample_block, k, None)
|
|
|
| def fuse_qkv_projections(self):
|
| """
|
| Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
|
| key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
|
|
|
| <Tip warning={true}>
|
|
|
| This API is 🧪 experimental.
|
|
|
| </Tip>
|
| """
|
| self.original_attn_processors = None
|
|
|
| for _, attn_processor in self.attn_processors.items():
|
| if "Added" in str(attn_processor.__class__.__name__):
|
| raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
|
|
|
| self.original_attn_processors = self.attn_processors
|
|
|
| for module in self.modules():
|
| if isinstance(module, Attention):
|
| module.fuse_projections(fuse=True)
|
|
|
| def unfuse_qkv_projections(self):
|
| """Disables the fused QKV projection if enabled.
|
|
|
| <Tip warning={true}>
|
|
|
| This API is 🧪 experimental.
|
|
|
| </Tip>
|
|
|
| """
|
| if self.original_attn_processors is not None:
|
| self.set_attn_processor(self.original_attn_processors)
|
|
|
| def forward(
|
| self,
|
| sample: torch.FloatTensor,
|
| timestep: Union[torch.Tensor, float, int],
|
| encoder_hidden_states: torch.Tensor,
|
| class_labels: Optional[torch.Tensor] = None,
|
| timestep_cond: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
| down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
|
| mid_block_additional_residual: Optional[torch.Tensor] = None,
|
| down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
|
| encoder_attention_mask: Optional[torch.Tensor] = None,
|
| return_dict: bool = True,
|
| garment_features: Optional[Tuple[torch.Tensor]] = None,
|
| ) -> Union[UNet2DConditionOutput, Tuple]:
|
| r"""
|
| The [`UNet2DConditionModel`] forward method.
|
|
|
| Args:
|
| sample (`torch.FloatTensor`):
|
| The noisy input tensor with the following shape `(batch, channel, height, width)`.
|
| timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
|
| encoder_hidden_states (`torch.FloatTensor`):
|
| The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
|
| class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
| Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
| timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
|
| Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
|
| through the `self.time_embedding` layer to obtain the timestep embeddings.
|
| attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
|
| An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
|
| is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
|
| negative values to the attention scores corresponding to "discard" tokens.
|
| cross_attention_kwargs (`dict`, *optional*):
|
| A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| `self.processor` in
|
| [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| added_cond_kwargs: (`dict`, *optional*):
|
| A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
|
| are passed along to the UNet blocks.
|
| down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
|
| A tuple of tensors that if specified are added to the residuals of down unet blocks.
|
| mid_block_additional_residual: (`torch.Tensor`, *optional*):
|
| A tensor that if specified is added to the residual of the middle unet block.
|
| encoder_attention_mask (`torch.Tensor`):
|
| A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
|
| `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
|
| which adds large negative values to the attention scores corresponding to "discard" tokens.
|
| return_dict (`bool`, *optional*, defaults to `True`):
|
| Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
|
| tuple.
|
| cross_attention_kwargs (`dict`, *optional*):
|
| A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
|
| added_cond_kwargs: (`dict`, *optional*):
|
| A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
|
| are passed along to the UNet blocks.
|
| down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
|
| additional residuals to be added to UNet long skip connections from down blocks to up blocks for
|
| example from ControlNet side model(s)
|
| mid_block_additional_residual (`torch.Tensor`, *optional*):
|
| additional residual to be added to UNet mid block output, for example from ControlNet side model
|
| down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
|
| additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
|
|
|
| Returns:
|
| [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
|
| If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
|
| a `tuple` is returned where the first element is the sample tensor.
|
| """
|
|
|
|
|
|
|
|
|
| default_overall_up_factor = 2**self.num_upsamplers
|
|
|
|
|
| forward_upsample_size = False
|
| upsample_size = None
|
|
|
| for dim in sample.shape[-2:]:
|
| if dim % default_overall_up_factor != 0:
|
|
|
| forward_upsample_size = True
|
| break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None:
|
|
|
|
|
|
|
|
|
| attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
| attention_mask = attention_mask.unsqueeze(1)
|
|
|
|
|
| if encoder_attention_mask is not None:
|
| encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
|
| encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
|
|
|
|
|
| if self.config.center_input_sample:
|
| sample = 2 * sample - 1.0
|
|
|
|
|
| timesteps = timestep
|
| if not torch.is_tensor(timesteps):
|
|
|
|
|
| is_mps = sample.device.type == "mps"
|
| if isinstance(timestep, float):
|
| dtype = torch.float32 if is_mps else torch.float64
|
| else:
|
| dtype = torch.int32 if is_mps else torch.int64
|
| timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
| elif len(timesteps.shape) == 0:
|
| timesteps = timesteps[None].to(sample.device)
|
|
|
|
|
| timesteps = timesteps.expand(sample.shape[0])
|
|
|
| t_emb = self.time_proj(timesteps)
|
|
|
|
|
|
|
|
|
| t_emb = t_emb.to(dtype=sample.dtype)
|
|
|
| emb = self.time_embedding(t_emb, timestep_cond)
|
| aug_emb = None
|
|
|
| if self.class_embedding is not None:
|
| if class_labels is None:
|
| raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
|
|
| if self.config.class_embed_type == "timestep":
|
| class_labels = self.time_proj(class_labels)
|
|
|
|
|
|
|
| class_labels = class_labels.to(dtype=sample.dtype)
|
|
|
| class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
|
|
|
| if self.config.class_embeddings_concat:
|
| emb = torch.cat([emb, class_emb], dim=-1)
|
| else:
|
| emb = emb + class_emb
|
|
|
| if self.config.addition_embed_type == "text":
|
| aug_emb = self.add_embedding(encoder_hidden_states)
|
| elif self.config.addition_embed_type == "text_image":
|
|
|
| if "image_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
|
| )
|
|
|
| image_embs = added_cond_kwargs.get("image_embeds")
|
| text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
|
| aug_emb = self.add_embedding(text_embs, image_embs)
|
| elif self.config.addition_embed_type == "text_time":
|
|
|
| if "text_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
|
| )
|
| text_embeds = added_cond_kwargs.get("text_embeds")
|
| if "time_ids" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
|
| )
|
| time_ids = added_cond_kwargs.get("time_ids")
|
| time_embeds = self.add_time_proj(time_ids.flatten())
|
| time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
|
| add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
|
| add_embeds = add_embeds.to(emb.dtype)
|
| aug_emb = self.add_embedding(add_embeds)
|
| elif self.config.addition_embed_type == "image":
|
|
|
| if "image_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
|
| )
|
| image_embs = added_cond_kwargs.get("image_embeds")
|
| aug_emb = self.add_embedding(image_embs)
|
| elif self.config.addition_embed_type == "image_hint":
|
|
|
| if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
|
| )
|
| image_embs = added_cond_kwargs.get("image_embeds")
|
| hint = added_cond_kwargs.get("hint")
|
| aug_emb, hint = self.add_embedding(image_embs, hint)
|
| sample = torch.cat([sample, hint], dim=1)
|
|
|
| emb = emb + aug_emb if aug_emb is not None else emb
|
|
|
| if self.time_embed_act is not None:
|
| emb = self.time_embed_act(emb)
|
|
|
| if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
|
| encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
|
| elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
|
|
|
| if "image_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
|
| )
|
|
|
| image_embeds = added_cond_kwargs.get("image_embeds")
|
| encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
|
| elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
|
|
|
| if "image_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
|
| )
|
| image_embeds = added_cond_kwargs.get("image_embeds")
|
| encoder_hidden_states = self.encoder_hid_proj(image_embeds)
|
| elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
|
| if "image_embeds" not in added_cond_kwargs:
|
| raise ValueError(
|
| f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
|
| )
|
| image_embeds = added_cond_kwargs.get("image_embeds")
|
|
|
|
|
| encoder_hidden_states = torch.cat([encoder_hidden_states, image_embeds], dim=1)
|
|
|
|
|
| sample = self.conv_in(sample)
|
|
|
|
|
| if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
|
| cross_attention_kwargs = cross_attention_kwargs.copy()
|
| gligen_args = cross_attention_kwargs.pop("gligen")
|
| cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
|
|
|
|
|
| curr_garment_feat_idx = 0
|
|
|
|
|
|
|
| lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
|
| if USE_PEFT_BACKEND:
|
|
|
| scale_lora_layers(self, lora_scale)
|
|
|
| is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
|
|
|
| is_adapter = down_intrablock_additional_residuals is not None
|
|
|
|
|
|
|
| if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
|
| deprecate(
|
| "T2I should not use down_block_additional_residuals",
|
| "1.3.0",
|
| "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
|
| and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
|
| for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
|
| standard_warn=False,
|
| )
|
| down_intrablock_additional_residuals = down_block_additional_residuals
|
| is_adapter = True
|
|
|
| down_block_res_samples = (sample,)
|
| for downsample_block in self.down_blocks:
|
| if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
|
|
| additional_residuals = {}
|
| if is_adapter and len(down_intrablock_additional_residuals) > 0:
|
| additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
|
|
|
| sample, res_samples,curr_garment_feat_idx = downsample_block(
|
| hidden_states=sample,
|
| temb=emb,
|
| encoder_hidden_states=encoder_hidden_states,
|
| attention_mask=attention_mask,
|
| cross_attention_kwargs=cross_attention_kwargs,
|
| encoder_attention_mask=encoder_attention_mask,
|
| garment_features=garment_features,
|
| curr_garment_feat_idx=curr_garment_feat_idx,
|
| **additional_residuals,
|
| )
|
| else:
|
| sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)
|
| if is_adapter and len(down_intrablock_additional_residuals) > 0:
|
| sample += down_intrablock_additional_residuals.pop(0)
|
|
|
| down_block_res_samples += res_samples
|
|
|
|
|
| if is_controlnet:
|
| new_down_block_res_samples = ()
|
|
|
| for down_block_res_sample, down_block_additional_residual in zip(
|
| down_block_res_samples, down_block_additional_residuals
|
| ):
|
| down_block_res_sample = down_block_res_sample + down_block_additional_residual
|
| new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
|
|
|
| down_block_res_samples = new_down_block_res_samples
|
|
|
|
|
| if self.mid_block is not None:
|
| if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
|
| sample ,curr_garment_feat_idx= self.mid_block(
|
| sample,
|
| emb,
|
| encoder_hidden_states=encoder_hidden_states,
|
| attention_mask=attention_mask,
|
| cross_attention_kwargs=cross_attention_kwargs,
|
| encoder_attention_mask=encoder_attention_mask,
|
| garment_features=garment_features,
|
| curr_garment_feat_idx=curr_garment_feat_idx,
|
| )
|
| else:
|
| sample = self.mid_block(sample, emb)
|
|
|
|
|
| if (
|
| is_adapter
|
| and len(down_intrablock_additional_residuals) > 0
|
| and sample.shape == down_intrablock_additional_residuals[0].shape
|
| ):
|
| sample += down_intrablock_additional_residuals.pop(0)
|
|
|
| if is_controlnet:
|
| sample = sample + mid_block_additional_residual
|
|
|
|
|
|
|
|
|
| for i, upsample_block in enumerate(self.up_blocks):
|
| is_final_block = i == len(self.up_blocks) - 1
|
|
|
| res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
| down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
|
|
|
|
|
|
| if not is_final_block and forward_upsample_size:
|
| upsample_size = down_block_res_samples[-1].shape[2:]
|
|
|
| if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
|
| sample ,curr_garment_feat_idx= upsample_block(
|
| hidden_states=sample,
|
| temb=emb,
|
| res_hidden_states_tuple=res_samples,
|
| encoder_hidden_states=encoder_hidden_states,
|
| cross_attention_kwargs=cross_attention_kwargs,
|
| upsample_size=upsample_size,
|
| attention_mask=attention_mask,
|
| encoder_attention_mask=encoder_attention_mask,
|
| garment_features=garment_features,
|
| curr_garment_feat_idx=curr_garment_feat_idx,
|
| )
|
|
|
| else:
|
| sample = upsample_block(
|
| hidden_states=sample,
|
| temb=emb,
|
| res_hidden_states_tuple=res_samples,
|
| upsample_size=upsample_size,
|
| scale=lora_scale,
|
| )
|
|
|
| if self.conv_norm_out:
|
| sample = self.conv_norm_out(sample)
|
| sample = self.conv_act(sample)
|
| sample = self.conv_out(sample)
|
|
|
| if USE_PEFT_BACKEND:
|
|
|
| unscale_lora_layers(self, lora_scale)
|
|
|
| if not return_dict:
|
| return (sample,)
|
|
|
| return UNet2DConditionOutput(sample=sample)
|
|
|