|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| """ PyTorch CLIP model."""
|
|
|
|
|
| from dataclasses import dataclass
|
| from typing import Any, Optional, Tuple, Union
|
|
|
| import torch
|
| import torch.utils.checkpoint
|
| from torch import nn
|
|
|
| from transformers.activations import ACT2FN
|
| from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
| from transformers.modeling_utils import PreTrainedModel
|
| from transformers.utils import (
|
| ModelOutput,
|
| add_start_docstrings,
|
| add_start_docstrings_to_model_forward,
|
| logging,
|
| replace_return_docstrings,
|
| )
|
| from transformers.models.clip.configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
|
|
|
|
| logger = logging.get_logger(__name__)
|
|
|
| _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
|
|
|
| CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| "openai/clip-vit-base-patch32",
|
|
|
| ]
|
|
|
|
|
|
|
| def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| """
|
| Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| """
|
| bsz, src_len = mask.size()
|
| tgt_len = tgt_len if tgt_len is not None else src_len
|
|
|
| expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
|
|
| inverted_mask = 1.0 - expanded_mask
|
|
|
| return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
|
|
|
|
|
|
|
|
| def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
| return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
|
|
|
|
| def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
|
| caption_loss = contrastive_loss(similarity)
|
| image_loss = contrastive_loss(similarity.t())
|
| return (caption_loss + image_loss) / 2.0
|
|
|
|
|
| @dataclass
|
| class CLIPVisionModelOutput(ModelOutput):
|
| """
|
| Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
|
|
| Args:
|
| image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| The image embeddings obtained by applying the projection layer to the pooler_output.
|
| last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| Sequence of hidden-states at the output of the last layer of the model.
|
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| sequence_length)`.
|
|
|
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| heads.
|
| """
|
|
|
| image_embeds: Optional[torch.FloatTensor] = None
|
| last_hidden_state: torch.FloatTensor = None
|
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
|
|
|
| @dataclass
|
| class CLIPTextModelOutput(ModelOutput):
|
| """
|
| Base class for text model's outputs that also contains a pooling of the last hidden states.
|
|
|
| Args:
|
| text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| The text embeddings obtained by applying the projection layer to the pooler_output.
|
| last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| Sequence of hidden-states at the output of the last layer of the model.
|
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| sequence_length)`.
|
|
|
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| heads.
|
| """
|
|
|
| text_embeds: Optional[torch.FloatTensor] = None
|
| last_hidden_state: torch.FloatTensor = None
|
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
|
|
|
| @dataclass
|
| class CLIPOutput(ModelOutput):
|
| """
|
| Args:
|
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
| Contrastive loss for image-text similarity.
|
| logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
| The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
| similarity scores.
|
| logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
| The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
| similarity scores.
|
| text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
|
| image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
| The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
| text_model_output(`BaseModelOutputWithPooling`):
|
| The output of the [`CLIPTextModel`].
|
| vision_model_output(`BaseModelOutputWithPooling`):
|
| The output of the [`CLIPVisionModel`].
|
| """
|
|
|
| loss: Optional[torch.FloatTensor] = None
|
| logits_per_image: torch.FloatTensor = None
|
| logits_per_text: torch.FloatTensor = None
|
| text_embeds: torch.FloatTensor = None
|
| image_embeds: torch.FloatTensor = None
|
| text_model_output: BaseModelOutputWithPooling = None
|
| vision_model_output: BaseModelOutputWithPooling = None
|
|
|
| def to_tuple(self) -> Tuple[Any]:
|
| return tuple(
|
| self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
| for k in self.keys()
|
| )
|
|
|
|
|
| class CLIPVisionEmbeddings(nn.Module):
|
| def __init__(self, config: CLIPVisionConfig):
|
| super().__init__()
|
| self.config = config
|
| self.embed_dim = config.hidden_size
|
| self.image_size = config.image_size
|
| self.patch_size = config.patch_size
|
|
|
| self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
|
|
| self.patch_embedding = nn.Conv2d(
|
| in_channels=config.num_channels,
|
| out_channels=self.embed_dim,
|
| kernel_size=self.patch_size,
|
| stride=self.patch_size,
|
| bias=False,
|
| )
|
|
|
| self.num_patches = (self.image_size // self.patch_size) ** 2
|
| self.num_positions = self.num_patches + 1
|
| self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
| self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
|
|
| def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| batch_size = pixel_values.shape[0]
|
| patch_embeds = self.patch_embedding(pixel_values)
|
| patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
|
|
| class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
| embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| embeddings = embeddings + self.position_embedding(self.position_ids)
|
| return embeddings
|
|
|
|
|
| class CLIPTextEmbeddings(nn.Module):
|
| def __init__(self, config: CLIPTextConfig):
|
| super().__init__()
|
| embed_dim = config.hidden_size
|
|
|
| self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
|
| self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
|
|
|
|
|
| self.register_buffer(
|
| "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
| )
|
|
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.LongTensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| ) -> torch.Tensor:
|
| seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
|
|
|
| if position_ids is None:
|
| position_ids = self.position_ids[:, :seq_length]
|
|
|
| if inputs_embeds is None:
|
| inputs_embeds = self.token_embedding(input_ids)
|
|
|
| position_embeddings = self.position_embedding(position_ids)
|
| embeddings = inputs_embeds + position_embeddings
|
|
|
| return embeddings
|
|
|
|
|
| class CLIPAttention(nn.Module):
|
| """Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
| def __init__(self, config):
|
| super().__init__()
|
| self.config = config
|
| self.embed_dim = config.hidden_size
|
| self.num_heads = config.num_attention_heads
|
| self.head_dim = self.embed_dim // self.num_heads
|
| if self.head_dim * self.num_heads != self.embed_dim:
|
| raise ValueError(
|
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| f" {self.num_heads})."
|
| )
|
| self.scale = self.head_dim**-0.5
|
| self.dropout = config.attention_dropout
|
|
|
| self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
|
| def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| causal_attention_mask: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = False,
|
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| """Input shape: Batch x Time x Channel"""
|
|
|
| bsz, tgt_len, embed_dim = hidden_states.size()
|
|
|
|
|
| query_states = self.q_proj(hidden_states) * self.scale
|
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
|
|
| proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
| key_states = key_states.view(*proj_shape)
|
| value_states = value_states.view(*proj_shape)
|
|
|
| src_len = key_states.size(1)
|
| attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
|
|
| if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
| raise ValueError(
|
| f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| f" {attn_weights.size()}"
|
| )
|
|
|
|
|
| if causal_attention_mask is not None:
|
| if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| raise ValueError(
|
| f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
| f" {causal_attention_mask.size()}"
|
| )
|
| attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
| attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
|
| if attention_mask is not None:
|
| if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| raise ValueError(
|
| f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
| )
|
| attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
| attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
|
|
| if output_attentions:
|
|
|
|
|
|
|
|
|
| attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| else:
|
| attn_weights_reshaped = None
|
|
|
| attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
|
|
| attn_output = torch.bmm(attn_probs, value_states)
|
|
|
| if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
| raise ValueError(
|
| f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
| f" {attn_output.size()}"
|
| )
|
|
|
| attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
| attn_output = attn_output.transpose(1, 2)
|
| attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
|
|
| attn_output = self.out_proj(attn_output)
|
|
|
| return attn_output, attn_weights_reshaped
|
|
|
|
|
| class CLIPMLP(nn.Module):
|
| def __init__(self, config):
|
| super().__init__()
|
| self.config = config
|
| self.activation_fn = ACT2FN[config.hidden_act]
|
| self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| hidden_states = self.fc1(hidden_states)
|
| hidden_states = self.activation_fn(hidden_states)
|
| hidden_states = self.fc2(hidden_states)
|
| return hidden_states
|
|
|
|
|
| class CLIPEncoderLayer(nn.Module):
|
| def __init__(self, config: CLIPConfig):
|
| super().__init__()
|
| self.embed_dim = config.hidden_size
|
| self.self_attn = CLIPAttention(config)
|
| self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| self.mlp = CLIPMLP(config)
|
| self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: torch.Tensor,
|
| causal_attention_mask: torch.Tensor,
|
| output_attentions: Optional[bool] = False,
|
| ) -> Tuple[torch.FloatTensor]:
|
| """
|
| Args:
|
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| attention_mask (`torch.FloatTensor`): attention mask of size
|
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| `(config.encoder_attention_heads,)`.
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| returned tensors for more detail.
|
| """
|
| residual = hidden_states
|
|
|
| hidden_states = self.layer_norm1(hidden_states)
|
| hidden_states, attn_weights = self.self_attn(
|
| hidden_states=hidden_states,
|
| attention_mask=attention_mask,
|
| causal_attention_mask=causal_attention_mask,
|
| output_attentions=output_attentions,
|
| )
|
| hidden_states = residual + hidden_states
|
|
|
| residual = hidden_states
|
| hidden_states = self.layer_norm2(hidden_states)
|
| hidden_states = self.mlp(hidden_states)
|
| hidden_states = residual + hidden_states
|
|
|
| outputs = (hidden_states,)
|
|
|
| if output_attentions:
|
| outputs += (attn_weights,)
|
|
|
| return outputs
|
|
|
|
|
| class CLIPPreTrainedModel(PreTrainedModel):
|
| """
|
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| models.
|
| """
|
|
|
| config_class = CLIPConfig
|
| base_model_prefix = "clip"
|
| supports_gradient_checkpointing = True
|
|
|
| def _init_weights(self, module):
|
| """Initialize the weights"""
|
| factor = self.config.initializer_factor
|
| if isinstance(module, CLIPTextEmbeddings):
|
| module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
| elif isinstance(module, CLIPVisionEmbeddings):
|
| factor = self.config.initializer_factor
|
| nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
|
| nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
|
| nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
|
| elif isinstance(module, CLIPAttention):
|
| factor = self.config.initializer_factor
|
| in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| out_proj_std = (module.embed_dim**-0.5) * factor
|
| nn.init.normal_(module.q_proj.weight, std=in_proj_std)
|
| nn.init.normal_(module.k_proj.weight, std=in_proj_std)
|
| nn.init.normal_(module.v_proj.weight, std=in_proj_std)
|
| nn.init.normal_(module.out_proj.weight, std=out_proj_std)
|
| elif isinstance(module, CLIPMLP):
|
| factor = self.config.initializer_factor
|
| in_proj_std = (
|
| (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
| )
|
| fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
| nn.init.normal_(module.fc1.weight, std=fc_std)
|
| nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
| elif isinstance(module, CLIPModel):
|
| nn.init.normal_(
|
| module.text_projection.weight,
|
| std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
|
| )
|
| nn.init.normal_(
|
| module.visual_projection.weight,
|
| std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
|
| )
|
| elif isinstance(module, CLIPVisionModelWithProjection):
|
| nn.init.normal_(
|
| module.visual_projection.weight,
|
| std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| )
|
| elif isinstance(module, CLIPTextModelWithProjection):
|
| nn.init.normal_(
|
| module.text_projection.weight,
|
| std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
| )
|
|
|
| if isinstance(module, nn.LayerNorm):
|
| module.bias.data.zero_()
|
| module.weight.data.fill_(1.0)
|
| if isinstance(module, nn.Linear) and module.bias is not None:
|
| module.bias.data.zero_()
|
|
|
| def _set_gradient_checkpointing(self, module, value=False):
|
| if isinstance(module, CLIPEncoder):
|
| module.gradient_checkpointing = value
|
|
|
|
|
| CLIP_START_DOCSTRING = r"""
|
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| etc.)
|
|
|
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| and behavior.
|
|
|
| Parameters:
|
| config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
|
| Initializing with a config file does not load the weights associated with the model, only the
|
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| """
|
|
|
| CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
| Args:
|
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| it.
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| [What are input IDs?](../glossary#input-ids)
|
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| config.max_position_embeddings - 1]`.
|
|
|
| [What are position IDs?](../glossary#position-ids)
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
|
|
| CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| Args:
|
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
|
|
| CLIP_INPUTS_DOCSTRING = r"""
|
| Args:
|
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| it.
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| [What are input IDs?](../glossary#input-ids)
|
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| config.max_position_embeddings - 1]`.
|
|
|
| [What are position IDs?](../glossary#position-ids)
|
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| return_loss (`bool`, *optional*):
|
| Whether or not to return the contrastive loss.
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
|
|
|
|
| class CLIPEncoder(nn.Module):
|
| """
|
| Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| [`CLIPEncoderLayer`].
|
|
|
| Args:
|
| config: CLIPConfig
|
| """
|
|
|
| def __init__(self, config: CLIPConfig, PT_len):
|
| super().__init__()
|
| self.config = config
|
| self.prompts = []
|
| self.prompts_token_len = PT_len
|
| import torch.nn.init as init
|
| if self.prompts_token_len > 0:
|
| for i in range(config.num_hidden_layers):
|
| self.prompts.append(init.xavier_uniform_(nn.Parameter(torch.randn(1,PT_len,config.hidden_size))))
|
| self.prompts = nn.ParameterList(self.prompts)
|
| self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
|
|
| self.debug_weights = 0
|
| self.index = 0
|
| self.gradient_checkpointing = False
|
|
|
| def forward(
|
| self,
|
| inputs_embeds,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| causal_attention_mask: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, BaseModelOutput]:
|
| r"""
|
| Args:
|
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
| than the model's internal embedding lookup matrix.
|
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
| causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Causal mask for the text model. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| returned tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| for more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| encoder_states = () if output_hidden_states else None
|
| all_attentions = () if output_attentions else None
|
|
|
|
|
| if self.prompts_token_len > 0:
|
| inputs_PT = self.prompts[0].repeat(inputs_embeds.size(0), 1, 1).to(inputs_embeds.device).to(inputs_embeds.dtype)
|
| hidden_states = torch.cat((inputs_PT,inputs_embeds), dim=1)
|
|
|
|
|
| self.debug_weights = self.prompts[-5].data.clone().detach()
|
| self.index += 1
|
|
|
| else:
|
| hidden_states = inputs_embeds
|
|
|
|
|
| for idx, encoder_layer in enumerate(self.layers):
|
| if self.prompts_token_len > 0:
|
|
|
| hidden_states[:, :self.prompts_token_len, :] = self.prompts[idx].repeat(inputs_embeds.size(0),1, 1).to(hidden_states.device).to(hidden_states.dtype)
|
| if output_hidden_states:
|
| encoder_states = encoder_states + (hidden_states,)
|
| if self.gradient_checkpointing and self.training:
|
|
|
| def create_custom_forward(module):
|
| def custom_forward(*inputs):
|
| return module(*inputs, output_attentions)
|
|
|
| return custom_forward
|
|
|
| layer_outputs = torch.utils.checkpoint.checkpoint(
|
| create_custom_forward(encoder_layer),
|
| hidden_states,
|
| attention_mask,
|
| causal_attention_mask,
|
| )
|
| else:
|
| layer_outputs = encoder_layer(
|
| hidden_states,
|
| attention_mask,
|
| causal_attention_mask,
|
| output_attentions=output_attentions,
|
| )
|
|
|
| hidden_states = layer_outputs[0]
|
|
|
| if output_attentions:
|
| all_attentions = all_attentions + (layer_outputs[1],)
|
|
|
| if output_hidden_states:
|
| encoder_states = encoder_states + (hidden_states,)
|
|
|
| if not return_dict:
|
| return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| return BaseModelOutput(
|
| last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| )
|
|
|
|
|
|
|
| def _make_causal_mask(
|
| input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| ):
|
| """
|
| Make causal mask used for bi-directional self-attention.
|
| """
|
| bsz, tgt_len = input_ids_shape
|
| mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
|
| mask_cond = torch.arange(mask.size(-1), device=device)
|
| mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
| mask = mask.to(dtype)
|
|
|
| if past_key_values_length > 0:
|
| mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
| return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
|
|
|
|
|
|
|
|
| class CLIPTextTransformer(nn.Module):
|
| def __init__(self, config: CLIPTextConfig):
|
| super().__init__()
|
| self.config = config
|
| embed_dim = config.hidden_size
|
| self.embeddings = CLIPTextEmbeddings(config)
|
| self.encoder = CLIPEncoder(config)
|
| self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| r"""
|
| Returns:
|
|
|
| """
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| if input_ids is None:
|
| raise ValueError("You have to specify input_ids")
|
|
|
| input_shape = input_ids.size()
|
| input_ids = input_ids.view(-1, input_shape[-1])
|
|
|
| hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
|
|
|
|
|
|
| causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)
|
|
|
| if attention_mask is not None:
|
|
|
| attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
|
|
|
| encoder_outputs = self.encoder(
|
| inputs_embeds=hidden_states,
|
| attention_mask=attention_mask,
|
| causal_attention_mask=causal_attention_mask,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| last_hidden_state = encoder_outputs[0]
|
| last_hidden_state = self.final_layer_norm(last_hidden_state)
|
|
|
|
|
|
|
|
|
| pooled_output = last_hidden_state[
|
| torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
| input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
|
| ]
|
|
|
| if not return_dict:
|
| return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
|
|
| return BaseModelOutputWithPooling(
|
| last_hidden_state=last_hidden_state,
|
| pooler_output=pooled_output,
|
| hidden_states=encoder_outputs.hidden_states,
|
| attentions=encoder_outputs.attentions,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """The text model from CLIP without any head or projection on top.""",
|
| CLIP_START_DOCSTRING,
|
| )
|
| class CLIPTextModel(CLIPPreTrainedModel):
|
| config_class = CLIPTextConfig
|
|
|
| _no_split_modules = ["CLIPEncoderLayer"]
|
|
|
| def __init__(self, config: CLIPTextConfig):
|
| super().__init__(config)
|
| self.text_model = CLIPTextTransformer(config)
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self) -> nn.Module:
|
| return self.text_model.embeddings.token_embedding
|
|
|
| def set_input_embeddings(self, value):
|
| self.text_model.embeddings.token_embedding = value
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| r"""
|
| Returns:
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, CLIPTextModel
|
|
|
| >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
|
|
| >>> outputs = model(**inputs)
|
| >>> last_hidden_state = outputs.last_hidden_state
|
| >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
| ```"""
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| return self.text_model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
|
|
| class CLIPVisionTransformer(nn.Module):
|
| def __init__(self, config: CLIPVisionConfig, PT_len):
|
| super().__init__()
|
| self.config = config
|
| embed_dim = config.hidden_size
|
|
|
| self.embeddings = CLIPVisionEmbeddings(config)
|
| self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| self.encoder = CLIPEncoder(config,PT_len)
|
| self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| def make_prompt_learnable(self):
|
|
|
| for pt in self.encoder.prompts:
|
| for p in pt.parameters():
|
| p.requires_grad = True
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| def forward(
|
| self,
|
| pixel_values: Optional[torch.FloatTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| r"""
|
| Returns:
|
|
|
| """
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| if pixel_values is None:
|
| raise ValueError("You have to specify pixel_values")
|
|
|
| hidden_states = self.embeddings(pixel_values)
|
| hidden_states = self.pre_layrnorm(hidden_states)
|
|
|
|
|
| encoder_outputs = self.encoder(
|
| inputs_embeds=hidden_states,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| last_hidden_state = encoder_outputs[0]
|
| pooled_output = last_hidden_state[:, 0, :]
|
| pooled_output = self.post_layernorm(pooled_output)
|
|
|
| if not return_dict:
|
| return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
|
|
| return BaseModelOutputWithPooling(
|
| last_hidden_state=last_hidden_state,
|
| pooler_output=pooled_output,
|
| hidden_states=encoder_outputs.hidden_states,
|
| attentions=encoder_outputs.attentions,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """The vision model from CLIP without any head or projection on top.""",
|
| CLIP_START_DOCSTRING,
|
| )
|
| class CLIPVisionModel(CLIPPreTrainedModel):
|
| config_class = CLIPVisionConfig
|
| main_input_name = "pixel_values"
|
|
|
| def __init__(self, config: CLIPVisionConfig, PT_len):
|
| super().__init__(config)
|
| self.vision_model = CLIPVisionTransformer(config,PT_len)
|
| self.vision_model.eval()
|
|
|
| self.post_init()
|
| def get_prompt_embeddings(self):
|
| return self.vision_model.encoder.prompts
|
| def make_prompt_learnable(self):
|
| for p in self.vision_model.encoder.parameters():
|
| p.requires_grad = False
|
| self.vision_model.encoder.prompts.requires_grad_(True)
|
| def make_prompt_unlearnable(self):
|
| for p in self.vision_model.encoder.parameters():
|
| p.requires_grad = False
|
| self.vision_model.encoder.prompts.requires_grad_(False)
|
|
|
| def get_input_embeddings(self) -> nn.Module:
|
| return self.vision_model.embeddings.patch_embedding
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| def forward(
|
| self,
|
| pixel_values: Optional[torch.FloatTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| r"""
|
| Returns:
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from PIL import Image
|
| >>> import requests
|
| >>> from transformers import AutoProcessor, CLIPVisionModel
|
|
|
| >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| >>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
| >>> inputs = processor(images=image, return_tensors="pt")
|
|
|
| >>> outputs = model(**inputs)
|
| >>> last_hidden_state = outputs.last_hidden_state
|
| >>> pooled_output = outputs.pooler_output # pooled CLS states
|
| ```"""
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| return self.vision_model(
|
| pixel_values=pixel_values,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
|
|
| @add_start_docstrings(CLIP_START_DOCSTRING)
|
| class CLIPModel(CLIPPreTrainedModel):
|
| config_class = CLIPConfig
|
|
|
| def __init__(self, config: CLIPConfig):
|
| super().__init__(config)
|
|
|
| if not isinstance(config.text_config, CLIPTextConfig):
|
| raise ValueError(
|
| "config.text_config is expected to be of type CLIPTextConfig but is of type"
|
| f" {type(config.text_config)}."
|
| )
|
|
|
| if not isinstance(config.vision_config, CLIPVisionConfig):
|
| raise ValueError(
|
| "config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
| f" {type(config.vision_config)}."
|
| )
|
|
|
| text_config = config.text_config
|
| vision_config = config.vision_config
|
|
|
| self.projection_dim = config.projection_dim
|
| self.text_embed_dim = text_config.hidden_size
|
| self.vision_embed_dim = vision_config.hidden_size
|
|
|
| self.text_model = CLIPTextTransformer(text_config)
|
| self.vision_model = CLIPVisionTransformer(vision_config)
|
|
|
| self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
| self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
| self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
|
|
|
|
| self.post_init()
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| def get_text_features(
|
| self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> torch.FloatTensor:
|
| r"""
|
| Returns:
|
| text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
| applying the projection layer to the pooled output of [`CLIPTextModel`].
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, CLIPModel
|
|
|
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
| >>> text_features = model.get_text_features(**inputs)
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| text_outputs = self.text_model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| pooled_output = text_outputs[1]
|
| text_features = self.text_projection(pooled_output)
|
|
|
| return text_features
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| def get_image_features(
|
| self,
|
| pixel_values: Optional[torch.FloatTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> torch.FloatTensor:
|
| r"""
|
| Returns:
|
| image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
| applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from PIL import Image
|
| >>> import requests
|
| >>> from transformers import AutoProcessor, CLIPModel
|
|
|
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| >>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
| >>> inputs = processor(images=image, return_tensors="pt")
|
|
|
| >>> image_features = model.get_image_features(**inputs)
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| vision_outputs = self.vision_model(
|
| pixel_values=pixel_values,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| pooled_output = vision_outputs[1]
|
| image_features = self.visual_projection(pooled_output)
|
|
|
| return image_features
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.LongTensor] = None,
|
| pixel_values: Optional[torch.FloatTensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| return_loss: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CLIPOutput]:
|
| r"""
|
| Returns:
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from PIL import Image
|
| >>> import requests
|
| >>> from transformers import AutoProcessor, CLIPModel
|
|
|
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| >>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
| >>> inputs = processor(
|
| ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
|
| ... )
|
|
|
| >>> outputs = model(**inputs)
|
| >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
| >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| vision_outputs = self.vision_model(
|
| pixel_values=pixel_values,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| text_outputs = self.text_model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| image_embeds = vision_outputs[1]
|
| image_embeds = self.visual_projection(image_embeds)
|
|
|
| text_embeds = text_outputs[1]
|
| text_embeds = self.text_projection(text_embeds)
|
|
|
|
|
| image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
|
| text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
|
|
|
|
| logit_scale = self.logit_scale.exp()
|
| logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
| logits_per_image = logits_per_text.t()
|
|
|
| loss = None
|
| if return_loss:
|
| loss = clip_loss(logits_per_text)
|
|
|
| if not return_dict:
|
| output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return CLIPOutput(
|
| loss=loss,
|
| logits_per_image=logits_per_image,
|
| logits_per_text=logits_per_text,
|
| text_embeds=text_embeds,
|
| image_embeds=image_embeds,
|
| text_model_output=text_outputs,
|
| vision_model_output=vision_outputs,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """
|
| CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).
|
| """,
|
| CLIP_START_DOCSTRING,
|
| )
|
| class CLIPTextModelWithProjection(CLIPPreTrainedModel):
|
| config_class = CLIPTextConfig
|
|
|
| _no_split_modules = ["CLIPEncoderLayer"]
|
|
|
| def __init__(self, config: CLIPTextConfig):
|
| super().__init__(config)
|
|
|
| self.text_model = CLIPTextTransformer(config)
|
|
|
| self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self) -> nn.Module:
|
| return self.text_model.embeddings.token_embedding
|
|
|
| def set_input_embeddings(self, value):
|
| self.text_model.embeddings.token_embedding = value
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.Tensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CLIPTextModelOutput]:
|
| r"""
|
| Returns:
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
|
|
| >>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
|
|
| >>> outputs = model(**inputs)
|
| >>> text_embeds = outputs.text_embeds
|
| ```"""
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| text_outputs = self.text_model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| pooled_output = text_outputs[1]
|
|
|
| text_embeds = self.text_projection(pooled_output)
|
|
|
| if not return_dict:
|
| outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
|
| return tuple(output for output in outputs if output is not None)
|
|
|
| return CLIPTextModelOutput(
|
| text_embeds=text_embeds,
|
| last_hidden_state=text_outputs.last_hidden_state,
|
| hidden_states=text_outputs.hidden_states,
|
| attentions=text_outputs.attentions,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """
|
| CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).
|
| """,
|
| CLIP_START_DOCSTRING,
|
| )
|
| class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
|
| config_class = CLIPVisionConfig
|
| main_input_name = "pixel_values"
|
|
|
| def __init__(self, config: CLIPVisionConfig):
|
| super().__init__(config)
|
|
|
| self.vision_model = CLIPVisionTransformer(config)
|
|
|
| self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self) -> nn.Module:
|
| return self.vision_model.embeddings.patch_embedding
|
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig)
|
| def forward(
|
| self,
|
| pixel_values: Optional[torch.FloatTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CLIPVisionModelOutput]:
|
| r"""
|
| Returns:
|
|
|
| Examples:
|
|
|
| ```python
|
| >>> from PIL import Image
|
| >>> import requests
|
| >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
|
|
| >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| >>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
| >>> inputs = processor(images=image, return_tensors="pt")
|
|
|
| >>> outputs = model(**inputs)
|
| >>> image_embeds = outputs.image_embeds
|
| ```"""
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| vision_outputs = self.vision_model(
|
| pixel_values=pixel_values,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| pooled_output = vision_outputs[1]
|
|
|
| image_embeds = self.visual_projection(pooled_output)
|
|
|
| if not return_dict:
|
| outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
|
| return tuple(output for output in outputs if output is not None)
|
|
|
| return CLIPVisionModelOutput(
|
| image_embeds=image_embeds,
|
| last_hidden_state=vision_outputs.last_hidden_state,
|
| hidden_states=vision_outputs.hidden_states,
|
| attentions=vision_outputs.attentions,
|
| )
|
|
|