| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from typing import Any, Dict, Optional |
|
|
| import torch |
| import torch.nn.functional as F |
| from torch import nn |
|
|
| from ..utils import maybe_allow_in_graph |
| from .activations import get_activation |
| from .attention_processor import Attention |
| from .embeddings import CombinedTimestepLabelEmbeddings |
| from .lora import LoRACompatibleLinear |
|
|
|
|
| @maybe_allow_in_graph |
| class BasicTransformerBlock(nn.Module): |
| r""" |
| A basic Transformer block. |
| |
| Parameters: |
| dim (`int`): The number of channels in the input and output. |
| num_attention_heads (`int`): The number of heads to use for multi-head attention. |
| attention_head_dim (`int`): The number of channels in each head. |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. |
| only_cross_attention (`bool`, *optional*): |
| Whether to use only cross-attention layers. In this case two cross attention layers are used. |
| double_self_attention (`bool`, *optional*): |
| Whether to use two self-attention layers. In this case no cross attention layers are used. |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| num_embeds_ada_norm (: |
| obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. |
| attention_bias (: |
| obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| dropout=0.0, |
| cross_attention_dim: Optional[int] = None, |
| activation_fn: str = "geglu", |
| num_embeds_ada_norm: Optional[int] = None, |
| attention_bias: bool = False, |
| only_cross_attention: bool = False, |
| double_self_attention: bool = False, |
| upcast_attention: bool = False, |
| norm_elementwise_affine: bool = True, |
| norm_type: str = "layer_norm", |
| final_dropout: bool = False, |
| ): |
| super().__init__() |
| self.only_cross_attention = only_cross_attention |
|
|
| self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" |
| self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" |
|
|
| if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: |
| raise ValueError( |
| f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" |
| f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." |
| ) |
|
|
| |
| |
| if self.use_ada_layer_norm: |
| self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) |
| elif self.use_ada_layer_norm_zero: |
| self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) |
| else: |
| self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) |
| self.attn1 = Attention( |
| query_dim=dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| cross_attention_dim=cross_attention_dim if only_cross_attention else None, |
| upcast_attention=upcast_attention, |
| ) |
|
|
| |
| if cross_attention_dim is not None or double_self_attention: |
| |
| |
| |
| self.norm2 = ( |
| AdaLayerNorm(dim, num_embeds_ada_norm) |
| if self.use_ada_layer_norm |
| else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) |
| ) |
| self.attn2 = Attention( |
| query_dim=dim, |
| cross_attention_dim=cross_attention_dim if not double_self_attention else None, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| upcast_attention=upcast_attention, |
| ) |
| else: |
| self.norm2 = None |
| self.attn2 = None |
|
|
| |
| self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) |
| self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) |
|
|
| |
| self._chunk_size = None |
| self._chunk_dim = 0 |
|
|
| def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int): |
| |
| self._chunk_size = chunk_size |
| self._chunk_dim = dim |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| timestep: Optional[torch.LongTensor] = None, |
| cross_attention_kwargs: Dict[str, Any] = None, |
| class_labels: Optional[torch.LongTensor] = None, |
| ): |
| |
| |
| if self.use_ada_layer_norm: |
| norm_hidden_states = self.norm1(hidden_states, timestep) |
| elif self.use_ada_layer_norm_zero: |
| norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( |
| hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype |
| ) |
| else: |
| norm_hidden_states = self.norm1(hidden_states) |
|
|
| cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} |
|
|
| attn_output = self.attn1( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| attention_mask=attention_mask, |
| **cross_attention_kwargs, |
| ) |
| if self.use_ada_layer_norm_zero: |
| attn_output = gate_msa.unsqueeze(1) * attn_output |
| hidden_states = attn_output + hidden_states |
|
|
| |
| if self.attn2 is not None: |
| norm_hidden_states = ( |
| self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) |
| ) |
|
|
| attn_output = self.attn2( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states, |
| attention_mask=encoder_attention_mask, |
| **cross_attention_kwargs, |
| ) |
| hidden_states = attn_output + hidden_states |
|
|
| |
| norm_hidden_states = self.norm3(hidden_states) |
|
|
| if self.use_ada_layer_norm_zero: |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
|
|
| if self._chunk_size is not None: |
| |
| if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: |
| raise ValueError( |
| f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." |
| ) |
|
|
| num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size |
| ff_output = torch.cat( |
| [self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)], |
| dim=self._chunk_dim, |
| ) |
| else: |
| ff_output = self.ff(norm_hidden_states) |
|
|
| if self.use_ada_layer_norm_zero: |
| ff_output = gate_mlp.unsqueeze(1) * ff_output |
|
|
| hidden_states = ff_output + hidden_states |
|
|
| return hidden_states |
|
|
|
|
| class FeedForward(nn.Module): |
| r""" |
| A feed-forward layer. |
| |
| Parameters: |
| dim (`int`): The number of channels in the input. |
| dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. |
| mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| dim_out: Optional[int] = None, |
| mult: int = 4, |
| dropout: float = 0.0, |
| activation_fn: str = "geglu", |
| final_dropout: bool = False, |
| ): |
| super().__init__() |
| inner_dim = int(dim * mult) |
| dim_out = dim_out if dim_out is not None else dim |
|
|
| if activation_fn == "gelu": |
| act_fn = GELU(dim, inner_dim) |
| if activation_fn == "gelu-approximate": |
| act_fn = GELU(dim, inner_dim, approximate="tanh") |
| elif activation_fn == "geglu": |
| act_fn = GEGLU(dim, inner_dim) |
| elif activation_fn == "geglu-approximate": |
| act_fn = ApproximateGELU(dim, inner_dim) |
|
|
| self.net = nn.ModuleList([]) |
| |
| self.net.append(act_fn) |
| |
| self.net.append(nn.Dropout(dropout)) |
| |
| self.net.append(LoRACompatibleLinear(inner_dim, dim_out)) |
| |
| if final_dropout: |
| self.net.append(nn.Dropout(dropout)) |
|
|
| def forward(self, hidden_states): |
| for module in self.net: |
| hidden_states = module(hidden_states) |
| return hidden_states |
|
|
|
|
| class GELU(nn.Module): |
| r""" |
| GELU activation function with tanh approximation support with `approximate="tanh"`. |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"): |
| super().__init__() |
| self.proj = nn.Linear(dim_in, dim_out) |
| self.approximate = approximate |
|
|
| def gelu(self, gate): |
| if gate.device.type != "mps": |
| return F.gelu(gate, approximate=self.approximate) |
| |
| return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.proj(hidden_states) |
| hidden_states = self.gelu(hidden_states) |
| return hidden_states |
|
|
|
|
| class GEGLU(nn.Module): |
| r""" |
| A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202. |
| |
| Parameters: |
| dim_in (`int`): The number of channels in the input. |
| dim_out (`int`): The number of channels in the output. |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int): |
| super().__init__() |
| self.proj = LoRACompatibleLinear(dim_in, dim_out * 2) |
|
|
| def gelu(self, gate): |
| if gate.device.type != "mps": |
| return F.gelu(gate) |
| |
| return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) |
|
|
| def forward(self, hidden_states): |
| hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) |
| return hidden_states * self.gelu(gate) |
|
|
|
|
| class ApproximateGELU(nn.Module): |
| """ |
| The approximate form of Gaussian Error Linear Unit (GELU) |
| |
| For more details, see section 2: https://arxiv.org/abs/1606.08415 |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int): |
| super().__init__() |
| self.proj = nn.Linear(dim_in, dim_out) |
|
|
| def forward(self, x): |
| x = self.proj(x) |
| return x * torch.sigmoid(1.702 * x) |
|
|
|
|
| class AdaLayerNorm(nn.Module): |
| """ |
| Norm layer modified to incorporate timestep embeddings. |
| """ |
|
|
| def __init__(self, embedding_dim, num_embeddings): |
| super().__init__() |
| self.emb = nn.Embedding(num_embeddings, embedding_dim) |
| self.silu = nn.SiLU() |
| self.linear = nn.Linear(embedding_dim, embedding_dim * 2) |
| self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False) |
|
|
| def forward(self, x, timestep): |
| emb = self.linear(self.silu(self.emb(timestep))) |
| scale, shift = torch.chunk(emb, 2) |
| x = self.norm(x) * (1 + scale) + shift |
| return x |
|
|
|
|
| class AdaLayerNormZero(nn.Module): |
| """ |
| Norm layer adaptive layer norm zero (adaLN-Zero). |
| """ |
|
|
| def __init__(self, embedding_dim, num_embeddings): |
| super().__init__() |
|
|
| self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) |
|
|
| self.silu = nn.SiLU() |
| self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) |
| self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) |
|
|
| def forward(self, x, timestep, class_labels, hidden_dtype=None): |
| emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype))) |
| shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) |
| x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] |
| return x, gate_msa, shift_mlp, scale_mlp, gate_mlp |
|
|
|
|
| class AdaGroupNorm(nn.Module): |
| """ |
| GroupNorm layer modified to incorporate timestep embeddings. |
| """ |
|
|
| def __init__( |
| self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5 |
| ): |
| super().__init__() |
| self.num_groups = num_groups |
| self.eps = eps |
|
|
| if act_fn is None: |
| self.act = None |
| else: |
| self.act = get_activation(act_fn) |
|
|
| self.linear = nn.Linear(embedding_dim, out_dim * 2) |
|
|
| def forward(self, x, emb): |
| if self.act: |
| emb = self.act(emb) |
| emb = self.linear(emb) |
| emb = emb[:, :, None, None] |
| scale, shift = emb.chunk(2, dim=1) |
|
|
| x = F.group_norm(x, self.num_groups, eps=self.eps) |
| x = x * (1 + scale) + shift |
| return x |
|
|