| |
|
|
| |
| __all__ = ['VTDEConfig', 'VTDEModel', 'SamVisionPreTrainedModel', 'SamVisionModel'] |
|
|
| |
| from transformers.models.clip.modeling_clip import CLIPOutput, clip_loss |
| from typing import Optional, Tuple, Union |
| from transformers import PreTrainedModel, VisionTextDualEncoderModel |
| import torch |
| from transformers import VisionTextDualEncoderConfig |
|
|
| class VTDEConfig(VisionTextDualEncoderConfig): |
| model_type = "vtde" |
| |
| def __init__(self, projection_dim=512, logit_scale_init_value=2.6592, |
| text_pooling_mode='mean', |
| vision_pooling_mode='max', |
| **kwargs): |
| """ |
| pooling_mode in ['mean', 'max', 'cls'] |
| https://arxiv.org/pdf/2210.09996.pdf |
| https://github.com/kahnchana/clippy/blob/3c102c29c32f7c66c6e52e09b795fe9c061bbb03/src/open_clip/hf_model.py#L56 |
| also |
| https://arxiv.org/pdf/2301.07836.pdf |
| """ |
| self.text_pooling_mode = text_pooling_mode |
| self.vision_pooling_mode = vision_pooling_mode |
| super().__init__(projection_dim, logit_scale_init_value, **kwargs) |
|
|
| VTDEConfig.register_for_auto_class() |
|
|
|
|
| class VTDEModel(VisionTextDualEncoderModel): |
| config_class = VTDEConfig |
| base_model_prefix = "vtde" |
|
|
| def __init__( |
| self, |
| config: Optional[VTDEConfig] = None, |
| vision_model: Optional[PreTrainedModel] = None, |
| text_model: Optional[PreTrainedModel] = None, |
| ): |
| |
| super().__init__(config, vision_model, text_model) |
| self.text_pooling_mode = config.text_pooling_mode |
| self.vision_pooling_mode = config.vision_pooling_mode |
|
|
| def get_text_features( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| position_ids=None, |
| token_type_ids=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
| text_outputs = self.text_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| if self.text_pooling_mode == 'cls': |
| pooled_output = text_outputs[1] |
| elif self.text_pooling_mode == 'mean': |
| pooled_output = torch.mean(text_outputs[0], dim=1) |
| elif self.text_pooling_mode == 'max': |
| pooled_output = torch.max(text_outputs[0], dim=1)[0] |
| elif self.text_pooling_mode == 'norm': |
| """we select the patch with the largest norm""" |
| last_hidden_states = text_outputs[0] |
| patch_norms = torch.norm(last_hidden_states[:, 1:, :], dim=-1) |
| max_norm_idx = torch.argmax(patch_norms, dim=1) |
| pooled_output = last_hidden_states[:, max_norm_idx, :][:, 0, :] |
| else: |
| "We want to raise the name of the pooling mode" |
| raise NotImplementedError |
|
|
| text_features = self.text_projection(pooled_output) |
|
|
| return text_features |
|
|
| def get_image_features( |
| self, |
| pixel_values=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
| vision_outputs = self.vision_model( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| if self.vision_pooling_mode == 'cls': |
| pooled_output = vision_outputs[1] |
| elif self.vision_pooling_mode == 'mean': |
| pooled_output = torch.mean(vision_outputs[0], dim=1) |
| elif self.vision_pooling_mode == 'max': |
| pooled_output = torch.max(vision_outputs[0], dim=1)[0] |
| elif self.vision_pooling_mode == 'norm': |
| """we select the patch with the largest norm""" |
| last_hidden_states = vision_outputs[0] |
| patch_norms = torch.norm(last_hidden_states[:, 1:, :], dim=-1) |
| max_norm_idx = torch.argmax(patch_norms, dim=1) |
| pooled_output = last_hidden_states[:, max_norm_idx, :][:, 0, :] |
| else: |
| raise NotImplementedError |
|
|
| image_features = self.visual_projection(pooled_output) |
|
|
| return image_features |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| return_loss: Optional[bool] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], CLIPOutput]: |
|
|
| return_dict = return_dict if return_dict is not None else self.config.return_dict |
|
|
| image_embeds = self.get_image_features( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| text_embeds = self.get_text_features( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| |
| image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) |
| text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) |
|
|
| |
| logit_scale = self.logit_scale.exp() |
| logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale |
| logits_per_image = logits_per_text.T |
|
|
| loss = None |
| if return_loss: |
| loss = clip_loss(logits_per_text) |
|
|
| if not return_dict: |
| output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_embeds, image_embeds) |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CLIPOutput( |
| loss=loss, |
| logits_per_image=logits_per_image, |
| logits_per_text=logits_per_text, |
| text_embeds=text_embeds, |
| image_embeds=image_embeds, |
| text_model_output=text_embeds, |
| vision_model_output=image_embeds, |
| ) |
| |
|
|
| VTDEModel.register_for_auto_class("AutoModel") |
| VTDEModel.register_for_auto_class("AutoModelForZeroShotImageClassification") |
|
|
| |
| |
| from transformers import PreTrainedModel |
| from transformers.models.sam.modeling_sam import SamPositionalEmbedding, SamVisionEncoder, SamVisionEncoderOutput |
| from transformers.models.sam.configuration_sam import SamVisionConfig |
| from torch import nn |
|
|
| class SamVisionPreTrainedModel(PreTrainedModel): |
| config_class = SamVisionConfig |
| base_model_prefix = "sam_vision_encoder" |
| main_input_name = "pixel_values" |
|
|
| def _init_weights(self, module): |
| std = self.config.initializer_range |
| if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
|
|
| class SamVisionModel(SamVisionPreTrainedModel): |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.shared_image_embedding = SamPositionalEmbedding(config) |
| self.vision_encoder = SamVisionEncoder(config) |
|
|
| def forward( |
| self, |
| pixel_values=None, |
| attention_mask=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ) -> SamVisionEncoderOutput: |
| return_dict = return_dict if return_dict is not None else self.config.return_dict |
|
|
| image_embeddings = self.shared_image_embedding(pixel_values) |
| vision_encoder_outputs = self.vision_encoder( |
| image_embeddings, |
| attention_mask=attention_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| return vision_encoder_outputs |
| |
| SamVisionModel.register_for_auto_class("AutoModel") |
| |
|
|
|
|