| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import math |
| import warnings |
| from dataclasses import dataclass |
| from typing import Any, Callable, Optional, Tuple, Union |
|
|
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| from torch.nn.init import _calculate_fan_in_and_fan_out |
|
|
| from transformers.activations import ACT2FN |
| from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask |
| from transformers.modeling_layers import GradientCheckpointingLayer |
| from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.utils import ModelOutput, auto_docstring, can_return_tuple, logging |
| from configuration_hunyuan_vit import HunyuanViTConfig, HunyuanViTVisionConfig |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| @dataclass |
| class HunyuanViTVisionOutput(ModelOutput): |
| """ |
| Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. |
| |
| Args: |
| image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): |
| The image embeddings obtained by applying the projection layer to the pooler_output. |
| last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Sequence of hidden-states at the output of the last layer of the model. |
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| sequence_length)`. |
| |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| """ |
|
|
| image_embeds: Optional[torch.FloatTensor] = None |
| last_hidden_state: Optional[torch.FloatTensor] = None |
| hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None |
| attentions: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
| @dataclass |
| class HunyuanViTOutput(ModelOutput): |
| """ |
| Args: |
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): |
| Contrastive loss for image-text similarity. |
| logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): |
| The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text |
| similarity scores. |
| logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): |
| The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image |
| similarity scores. |
| text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): |
| The text embeddings obtained by applying the projection layer to the pooled output of [`HunyuanViTTextModel`]. |
| image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): |
| The image embeddings obtained by applying the projection layer to the pooled output of [`HunyuanViTVisionModel`]. |
| text_model_output (`BaseModelOutputWithPooling`): |
| The output of the [`HunyuanViTTextModel`]. |
| vision_model_output (`BaseModelOutputWithPooling`): |
| The output of the [`HunyuanViTVisionModel`]. |
| """ |
|
|
| loss: Optional[torch.FloatTensor] = None |
| logits_per_image: Optional[torch.FloatTensor] = None |
| logits_per_text: Optional[torch.FloatTensor] = None |
| image_embeds: Optional[torch.FloatTensor] = None |
|
|
| def to_tuple(self) -> Tuple[Any]: |
| return tuple( |
| self[k] if k not in ["vision_model_output"] else getattr(self, k).to_tuple() |
| for k in self.keys() |
| ) |
|
|
|
|
| class HunyuanViTVisionEmbeddings(nn.Module): |
| def __init__(self, config: HunyuanViTVisionConfig): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.patch_size = config.patch_size |
|
|
| self.patch_embedding = nn.Linear( |
| in_features=config.num_channels * self.patch_size * self.patch_size, |
| out_features=self.embed_dim, |
| ) |
|
|
| self.num_patches = config.num_patches |
| self.position_embedding_size = int(self.num_patches**0.5) |
| self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim) |
|
|
| @staticmethod |
| def resize_positional_embeddings( |
| positional_embeddings: torch.Tensor, |
| spatial_shapes: torch.LongTensor, |
| max_length: int, |
| ) -> torch.Tensor: |
| """ |
| Resize positional embeddings to image-specific size and pad to a fixed size. |
| |
| Args: |
| positional_embeddings (`torch.Tensor`): |
| Position embeddings of shape (height, width, embed_dim) |
| spatial_shapes (`torch.LongTensor`): |
| Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to |
| max_length (`int`): |
| Maximum length of the positional embeddings to pad resized positional embeddings to |
| |
| Returns: |
| `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim) |
| """ |
| batch_size = spatial_shapes.shape[0] |
| embed_dim = positional_embeddings.shape[-1] |
| source_dtype = positional_embeddings.dtype |
|
|
| resulted_positional_embeddings = torch.empty( |
| (batch_size, max_length, embed_dim), |
| device=positional_embeddings.device, |
| dtype=source_dtype, |
| ) |
|
|
| |
| positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0) |
|
|
| |
| if positional_embeddings.device.type == "cpu": |
| positional_embeddings = positional_embeddings.to(torch.float32) |
|
|
| for i in range(batch_size): |
| |
| height, width = spatial_shapes[i] |
| resized_embeddings = F.interpolate( |
| positional_embeddings, |
| size=(height, width), |
| mode="bilinear", |
| align_corners=False, |
| antialias=True, |
| ) |
|
|
| |
| resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1) |
|
|
| |
| resized_embeddings = resized_embeddings.to(source_dtype) |
|
|
| resulted_positional_embeddings[i, : height * width] = resized_embeddings |
| resulted_positional_embeddings[i, height * width :] = resized_embeddings[0] |
|
|
| return resulted_positional_embeddings |
|
|
| def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor: |
| """ |
| Args: |
| pixel_values (`torch.FloatTensor`): |
| Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size) |
| spatial_shapes (`List[Tuple[int, int]]`): |
| Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to |
| """ |
|
|
| |
| target_dtype = self.patch_embedding.weight.dtype |
| patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) |
|
|
| |
| positional_embeddings = self.position_embedding.weight.reshape( |
| self.position_embedding_size, self.position_embedding_size, -1 |
| ) |
| resized_positional_embeddings = self.resize_positional_embeddings( |
| positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1] |
| ) |
|
|
| |
| embeddings = patch_embeds + resized_positional_embeddings |
| return embeddings |
|
|
|
|
| def eager_attention_forward( |
| module: nn.Module, |
| query: torch.Tensor, |
| key: torch.Tensor, |
| value: torch.Tensor, |
| attention_mask: Optional[torch.Tensor], |
| scaling: float, |
| dropout: float = 0.0, |
| **kwargs, |
| ): |
| attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling |
| if attention_mask is not None: |
| attn_weights = attn_weights + attention_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
|
|
| attn_output = torch.matmul(attn_weights, value) |
| attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
| return attn_output, attn_weights |
|
|
|
|
| class HunyuanViTAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_heads |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| f" {self.num_heads})." |
| ) |
| self.scale = self.head_dim**-0.5 |
| self.dropout = config.attention_dropout |
| self.is_causal = False |
|
|
| self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: |
| """Input shape: Batch x Time x Channel""" |
|
|
| batch_size, seq_length, embed_dim = hidden_states.shape |
|
|
| queries = self.q_proj(hidden_states) |
| keys = self.k_proj(hidden_states) |
| values = self.v_proj(hidden_states) |
|
|
| queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
| keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
| values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| if self.config._attn_implementation == "sdpa" and output_attentions: |
| logger.warning_once( |
| "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
| 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| ) |
| else: |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| attn_output, attn_weights = attention_interface( |
| self, |
| queries, |
| keys, |
| values, |
| attention_mask, |
| is_causal=self.is_causal, |
| scaling=self.scale, |
| dropout=0.0 if not self.training else self.dropout, |
| ) |
|
|
| attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() |
| attn_output = self.out_proj(attn_output) |
|
|
| if not output_attentions: |
| attn_weights = None |
|
|
| return attn_output, attn_weights |
|
|
|
|
| class HunyuanViTMLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.activation_fn = ACT2FN[config.hidden_act] |
| self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| hidden_states = self.fc1(hidden_states) |
| hidden_states = self.activation_fn(hidden_states) |
| hidden_states = self.fc2(hidden_states) |
| return hidden_states |
|
|
|
|
| class HunyuanViTEncoderLayer(GradientCheckpointingLayer): |
| def __init__(self, config: Union[HunyuanViTVisionConfig]): |
| super().__init__() |
| self.embed_dim = config.hidden_size |
| self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.self_attn = HunyuanViTAttention(config) |
| self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.mlp = HunyuanViTMLP(config) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.FloatTensor]: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): |
| Input to the layer of shape `(batch, seq_len, embed_dim)`. |
| attention_mask (`torch.FloatTensor`): |
| Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. |
| output_attentions (`bool`, *optional*, defaults to `False`): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| residual = hidden_states |
|
|
| hidden_states = self.layer_norm1(hidden_states) |
| hidden_states, attn_weights = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| residual = hidden_states |
| hidden_states = self.layer_norm2(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| class HunyuanViTEncoder(nn.Module): |
| """ |
| Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| [`HunyuanViTEncoderLayer`]. |
| |
| Args: |
| config: HunyuanViTConfig |
| """ |
|
|
| def __init__(self, config: HunyuanViTConfig): |
| super().__init__() |
| self.config = config |
| self.layers = nn.ModuleList([HunyuanViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.gradient_checkpointing = False |
|
|
| |
| @can_return_tuple |
| def forward( |
| self, |
| inputs_embeds, |
| attention_mask: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| ) -> BaseModelOutput: |
| r""" |
| Args: |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| encoder_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
|
|
| hidden_states = inputs_embeds |
| for encoder_layer in self.layers: |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| layer_outputs = encoder_layer( |
| hidden_states, |
| attention_mask, |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| return BaseModelOutput( |
| last_hidden_state=hidden_states, |
| hidden_states=encoder_states, |
| attentions=all_attentions, |
| ) |
|
|
|
|
| class HunyuanViTVisionTransformer(nn.Module): |
| def __init__(self, config: HunyuanViTVisionConfig): |
| super().__init__() |
| self.config = config |
| embed_dim = config.hidden_size |
|
|
| self.embeddings = HunyuanViTVisionEmbeddings(config) |
| self.encoder = HunyuanViTEncoder(config) |
| self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| |
| |
| |
| self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
|
|
| @can_return_tuple |
| @auto_docstring |
| def forward( |
| self, |
| pixel_values: torch.FloatTensor, |
| attention_mask: torch.Tensor, |
| spatial_shapes: torch.LongTensor, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| ) -> BaseModelOutput: |
| r""" |
| spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`): |
| Tensor containing the spatial dimensions (height, width) of the input images. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| hidden_states = self.embeddings(pixel_values, spatial_shapes) |
|
|
| if attention_mask is not None and not self._use_flash_attention_2: |
| |
| encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) |
| else: |
| encoder_attention_mask = attention_mask |
|
|
| encoder_outputs: BaseModelOutput = self.encoder( |
| inputs_embeds=hidden_states, |
| attention_mask=encoder_attention_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| ) |
|
|
| last_hidden_state = encoder_outputs.last_hidden_state |
| last_hidden_state = self.post_layernorm(last_hidden_state) |
|
|
| |
|
|
| return BaseModelOutput( |
| last_hidden_state=last_hidden_state, |
| |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|
|
|
| def _trunc_normal_(tensor, mean, std, a, b): |
| |
| |
| def norm_cdf(x): |
| |
| return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 |
|
|
| if (mean < a - 2 * std) or (mean > b + 2 * std): |
| warnings.warn( |
| "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
| "The distribution of values may be incorrect.", |
| stacklevel=2, |
| ) |
|
|
| |
| |
| |
| l = norm_cdf((a - mean) / std) |
| u = norm_cdf((b - mean) / std) |
|
|
| |
| |
| tensor.uniform_(2 * l - 1, 2 * u - 1) |
|
|
| |
| |
| tensor.erfinv_() |
|
|
| |
| tensor.mul_(std * math.sqrt(2.0)) |
| tensor.add_(mean) |
|
|
| |
| tensor.clamp_(min=a, max=b) |
|
|
|
|
| def trunc_normal_tf_( |
| tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0 |
| ) -> torch.Tensor: |
| """Fills the input Tensor with values drawn from a truncated |
| normal distribution. The values are effectively drawn from the |
| normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` |
| with values outside :math:`[a, b]` redrawn until they are within |
| the bounds. The method used for generating the random values works |
| best when :math:`a \\leq \text{mean} \\leq b`. |
| |
| NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the |
| bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 |
| and the result is subsequently scaled and shifted by the mean and std args. |
| |
| Args: |
| tensor: an n-dimensional `torch.Tensor` |
| mean: the mean of the normal distribution |
| std: the standard deviation of the normal distribution |
| a: the minimum cutoff value |
| b: the maximum cutoff value |
| """ |
| with torch.no_grad(): |
| _trunc_normal_(tensor, 0, 1.0, a, b) |
| tensor.mul_(std).add_(mean) |
|
|
|
|
| def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): |
| fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) |
| if mode == "fan_in": |
| denom = fan_in |
| elif mode == "fan_out": |
| denom = fan_out |
| elif mode == "fan_avg": |
| denom = (fan_in + fan_out) / 2 |
|
|
| variance = scale / denom |
|
|
| if distribution == "truncated_normal": |
| |
| trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) |
| elif distribution == "normal": |
| with torch.no_grad(): |
| tensor.normal_(std=math.sqrt(variance)) |
| elif distribution == "uniform": |
| bound = math.sqrt(3 * variance) |
| with torch.no_grad(): |
| tensor.uniform_(-bound, bound) |
| else: |
| raise ValueError(f"invalid distribution {distribution}") |
|
|
|
|
| def lecun_normal_(tensor): |
| variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") |
|
|
|
|
| def default_flax_embed_init(tensor): |
| variance_scaling_(tensor, mode="fan_in", distribution="normal") |
|
|
| @auto_docstring |
| class HunyuanViTPreTrainedModel(PreTrainedModel): |
| config_class = HunyuanViTConfig |
| base_model_prefix = "HunyuanViT" |
| supports_gradient_checkpointing = True |
|
|
| _no_split_modules = [ |
| "HunyuanViTTextEmbeddings", |
| "HunyuanViTEncoderLayer", |
| "HunyuanViTVisionEmbeddings", |
| "HunyuanViTEncoderLayer", |
| |
| ] |
| _supports_flash_attn_2 = True |
| _supports_sdpa = True |
| _supports_flex_attn = True |
| _supports_attention_backend = True |
|
|
| def _init_weights(self, module): |
| """Initialize the weights""" |
| if isinstance(module, HunyuanViTVisionEmbeddings): |
| width = ( |
| self.config.vision_config.hidden_size |
| if isinstance(self.config, HunyuanViTConfig) |
| else self.config.hidden_size |
| ) |
| nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) |
| elif isinstance(module, nn.Embedding): |
| default_flax_embed_init(module.weight) |
| elif isinstance(module, HunyuanViTAttention): |
| nn.init.xavier_uniform_(module.q_proj.weight) |
| nn.init.xavier_uniform_(module.k_proj.weight) |
| nn.init.xavier_uniform_(module.v_proj.weight) |
| nn.init.xavier_uniform_(module.out_proj.weight) |
| nn.init.zeros_(module.q_proj.bias) |
| nn.init.zeros_(module.k_proj.bias) |
| nn.init.zeros_(module.v_proj.bias) |
| nn.init.zeros_(module.out_proj.bias) |
| elif isinstance(module, HunyuanViTMLP): |
| nn.init.xavier_uniform_(module.fc1.weight) |
| nn.init.xavier_uniform_(module.fc2.weight) |
| nn.init.normal_(module.fc1.bias, std=1e-6) |
| nn.init.normal_(module.fc2.bias, std=1e-6) |
| |
| |
| |
| |
| elif isinstance(module, (nn.Linear, nn.Conv2d)): |
| lecun_normal_(module.weight) |
| if module.bias is not None: |
| nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
| |
|
|
| |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The vision model from HunyuanViT without any head or projection on top. |
| """ |
| ) |
| class HunyuanViTVisionModel(HunyuanViTPreTrainedModel): |
| config_class = HunyuanViTVisionConfig |
| main_input_name = "pixel_values" |
|
|
| def __init__(self, config: HunyuanViTVisionConfig): |
| super().__init__(config) |
|
|
| self.vision_model = HunyuanViTVisionTransformer(config) |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.vision_model.embeddings.patch_embedding |
|
|
| @can_return_tuple |
| @auto_docstring |
| def forward( |
| self, |
| pixel_values: torch.FloatTensor, |
| pixel_attention_mask: torch.Tensor, |
| spatial_shapes: torch.LongTensor, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| ) -> BaseModelOutputWithPooling: |
| r""" |
| pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
| Mask to avoid performing attention on padding pixel indices. |
| spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`): |
| Tensor containing the spatial dimensions (height, width) of the input images. |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, HunyuanViTVisionModel |
| |
| >>> model = HunyuanViTVisionModel.from_pretrained("google/HunyuanViT-base-patch16-224") |
| >>> processor = AutoProcessor.from_pretrained("google/HunyuanViT-base-patch16-224") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(images=image, return_tensors="pt") |
| |
| >>> outputs = model(**inputs) |
| >>> last_hidden_state = outputs.last_hidden_state |
| >>> pooled_output = outputs.pooler_output # pooled features |
| ```""" |
| return self.vision_model( |
| pixel_values=pixel_values, |
| attention_mask=pixel_attention_mask, |
| spatial_shapes=spatial_shapes, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| ) |
|
|
|
|
| @auto_docstring |
| class HunyuanViTModel(HunyuanViTPreTrainedModel): |
| config_class = HunyuanViTConfig |
|
|
| def __init__(self, config: HunyuanViTConfig): |
| super().__init__(config) |
|
|
| if not isinstance(config.vision_config, HunyuanViTVisionConfig): |
| raise TypeError( |
| "config.vision_config is expected to be of type HunyuanViTVisionConfig but is of type" |
| f" {type(config.vision_config)}." |
| ) |
|
|
| vision_config = config.vision_config |
|
|
| |
| vision_model = HunyuanViTVisionModel._from_config(vision_config) |
|
|
| |
| self.vision_model = vision_model.vision_model |
|
|
| |
| self.post_init() |
|
|
| @auto_docstring |
| def get_image_features( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| pixel_attention_mask: Optional[torch.Tensor] = None, |
| spatial_shapes: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| ) -> torch.FloatTensor: |
| r""" |
| pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
| Mask to avoid performing attention on padding pixel indices. |
| spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`): |
| Tensor containing the spatial dimensions (height, width) of the input images. |
| |
| Returns: |
| image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by |
| applying the projection layer to the pooled output of [`HunyuanViTVisionModel`]. |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, AutoModel |
| >>> import torch |
| |
| >>> model = AutoModel.from_pretrained("google/HunyuanViT-base-patch16-224") |
| >>> processor = AutoProcessor.from_pretrained("google/HunyuanViT-base-patch16-224") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(images=image, return_tensors="pt") |
| |
| >>> with torch.no_grad(): |
| ... image_features = model.get_image_features(**inputs) |
| ``` |
| """ |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| vision_outputs: BaseModelOutputWithPooling = self.vision_model( |
| pixel_values=pixel_values, |
| attention_mask=pixel_attention_mask, |
| spatial_shapes=spatial_shapes, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| ) |
|
|
| pooled_output = vision_outputs.pooler_output |
|
|
| return pooled_output |
|
|
|
|
| __all__ = [ |
| "HunyuanViTModel", |
| "HunyuanViTPreTrainedModel", |
| "HunyuanViTVisionModel", |
| "HunyuanViTForImageClassification", |
| ] |