| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Siglip model configuration""" |
|
|
| import os |
| from typing import Union |
|
|
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.utils import logging |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
| SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
| "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/config.json", |
| } |
|
|
|
|
| class SiglipTextConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a |
| Siglip text encoder according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip |
| [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| vocab_size (`int`, *optional*, defaults to 32000): |
| Vocabulary size of the Siglip text model. Defines the number of different tokens that can be represented by |
| the `inputs_ids` passed when calling [`SiglipModel`]. |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| max_position_embeddings (`int`, *optional*, defaults to 64): |
| The maximum sequence length that this model might ever be used with. Typically set this to something large |
| just in case (e.g., 512 or 1024 or 2048). |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-06): |
| The epsilon used by the layer normalization layers. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| pad_token_id (`int`, *optional*, defaults to 1): |
| The id of the padding token in the vocabulary. |
| bos_token_id (`int`, *optional*, defaults to 49406): |
| The id of the beginning-of-sequence token in the vocabulary. |
| eos_token_id (`int`, *optional*, defaults to 49407): |
| The id of the end-of-sequence token in the vocabulary. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import SiglipTextConfig, SiglipTextModel |
| |
| >>> # Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration |
| >>> configuration = SiglipTextConfig() |
| |
| >>> # Initializing a SiglipTextModel (with random weights) from the google/siglip-base-patch16-224 style configuration |
| >>> model = SiglipTextModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "siglip_text_model" |
|
|
| def __init__( |
| self, |
| vocab_size=32000, |
| hidden_size=768, |
| intermediate_size=3072, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| max_position_embeddings=64, |
| hidden_act="gelu_pytorch_tanh", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| |
| |
| pad_token_id=1, |
| bos_token_id=49406, |
| eos_token_id=49407, |
| **kwargs, |
| ): |
| super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
|
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.max_position_embeddings = max_position_embeddings |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
| self.attention_dropout = attention_dropout |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| cls._set_token_in_kwargs(kwargs) |
|
|
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| |
| if config_dict.get("model_type") == "siglip": |
| config_dict = config_dict["text_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
| class SiglipVisionConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a |
| Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip |
| [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| num_channels (`int`, *optional*, defaults to 3): |
| Number of channels in the input images. |
| image_size (`int`, *optional*, defaults to 224): |
| The size (resolution) of each image. |
| patch_size (`int`, *optional*, defaults to 16): |
| The size (resolution) of each patch. |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-06): |
| The epsilon used by the layer normalization layers. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import SiglipVisionConfig, SiglipVisionModel |
| |
| >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration |
| >>> configuration = SiglipVisionConfig() |
| |
| >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration |
| >>> model = SiglipVisionModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "siglip_vision_model" |
|
|
| def __init__( |
| self, |
| hidden_size=768, |
| intermediate_size=3072, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| num_channels=3, |
| image_size=224, |
| patch_size=16, |
| hidden_act="gelu_pytorch_tanh", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.patch_size = patch_size |
| self.image_size = image_size |
| self.attention_dropout = attention_dropout |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| cls._set_token_in_kwargs(kwargs) |
|
|
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| |
| if config_dict.get("model_type") == "siglip": |
| config_dict = config_dict["vision_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
| class SiglipConfig(PretrainedConfig): |
| r""" |
| [`SiglipConfig`] is the configuration class to store the configuration of a [`SiglipModel`]. It is used to |
| instantiate a Siglip model according to the specified arguments, defining the text model and vision model configs. |
| Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip |
| [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| text_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`SiglipTextConfig`]. |
| vision_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`SiglipVisionConfig`]. |
| kwargs (*optional*): |
| Dictionary of keyword arguments. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import SiglipConfig, SiglipModel |
| |
| >>> # Initializing a SiglipConfig with google/siglip-base-patch16-224 style configuration |
| >>> configuration = SiglipConfig() |
| |
| >>> # Initializing a SiglipModel (with random weights) from the google/siglip-base-patch16-224 style configuration |
| >>> model = SiglipModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| |
| >>> # We can also initialize a SiglipConfig from a SiglipTextConfig and a SiglipVisionConfig |
| >>> from transformers import SiglipTextConfig, SiglipVisionConfig |
| |
| >>> # Initializing a SiglipText and SiglipVision configuration |
| >>> config_text = SiglipTextConfig() |
| >>> config_vision = SiglipVisionConfig() |
| |
| >>> config = SiglipConfig.from_text_vision_configs(config_text, config_vision) |
| ```""" |
|
|
| model_type = "siglip" |
|
|
| def __init__(self, text_config=None, vision_config=None, **kwargs): |
| super().__init__(**kwargs) |
|
|
| if text_config is None: |
| text_config = {} |
| logger.info("`text_config` is `None`. Initializing the `SiglipTextConfig` with default values.") |
|
|
| if vision_config is None: |
| vision_config = {} |
| logger.info("`vision_config` is `None`. initializing the `SiglipVisionConfig` with default values.") |
|
|
| self.text_config = SiglipTextConfig(**text_config) |
| self.vision_config = SiglipVisionConfig(**vision_config) |
|
|
| self.initializer_factor = 1.0 |
|
|
| @classmethod |
| def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs): |
| r""" |
| Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision |
| model configuration. |
| |
| Returns: |
| [`SiglipConfig`]: An instance of a configuration object |
| """ |
|
|
| return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) |
|
|