# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/aimv2/modular_aimv2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_aimv2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Apple Inc. and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Aimv2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Aimv2VisionModel`]. It is used to instantiate a AIMv2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the AIMv2 [apple/aimv2-large-patch14-224](https://huggingface.co/apple/aimv2-large-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2816): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. qkv_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the queries, keys and values. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the Linear layers or Not. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the for initializing all weight matrices. use_head (`str`, *optional*, defaults to `True`): Whether to use Attention Pooling Head or Not. is_native (`str`, *optional*, defaults to `False`): Whether to use ckpt trained for image native resolution or not. Example: ```python >>> from transformers import SiglipVisionConfig, SiglipVisionModel >>> # Initializing a Aimv2VisionConfig with apple/aimv2-large-patch14-224 style configuration >>> configuration = Aimv2VisionConfig() >>> # Initializing a Aimv2VisionModel (with random weights) from the apple/aimv2-large-patch14-224 style configuration >>> model = Aimv2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "aimv2_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size: int = 1024, intermediate_size: int = 2816, num_hidden_layers: int = 24, num_attention_heads: int = 8, num_channels: int = 3, image_size: int = 224, patch_size: int = 14, rms_norm_eps: float = 1e-5, attention_dropout: float = 0.0, qkv_bias: bool = False, mlp_bias: bool = False, hidden_act: str = "silu", initializer_range: float = 0.02, use_head: bool = True, is_native: bool = False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.attention_dropout = attention_dropout self.hidden_act = hidden_act self.use_head = use_head self.initializer_range = initializer_range self.mlp_bias = mlp_bias self.qkv_bias = qkv_bias self.rms_norm_eps = rms_norm_eps self.is_native = is_native class Aimv2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Aimv2TextModel`]. It is used to instantiate a AIMv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text encoder of the AIMv2 [apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the AIMv2 text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Aimv2Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. qkv_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the queries, keys and values. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the Linear layers or Not. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. pad_token_id (`int`, *optional*, defaults to 1): The id of the padding token in the vocabulary. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the vocabulary. max_position_embeddings (`int`, *optional*, defaults to 77): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the for initializing all weight matrices. """ model_type = "aimv2_text_model" base_config_key = "text_config" def __init__( self, vocab_size: int = 49408, hidden_size: int = 768, intermediate_size: int = 2048, num_hidden_layers: int = 12, num_attention_heads: int = 6, rms_norm_eps: float = 1e-5, attention_dropout: float = 0.0, qkv_bias: bool = False, mlp_bias: bool = False, hidden_act: str = "silu", pad_token_id: Optional[int] = None, bos_token_id: Optional[int] = None, eos_token_id: int = 49407, max_position_embeddings: int = 77, initializer_range: bool = 0.02, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.mlp_bias = mlp_bias self.qkv_bias = qkv_bias self.rms_norm_eps = rms_norm_eps class Aimv2Config(PretrainedConfig): r""" [`Aimv2Config`] is the configuration class to store the configuration of a [`Aimv2Model`]. It is used to instantiate a AIMv2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the AIMv2 [apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Aimv2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Aimv2VisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Aimv2Config, Aimv2Model >>> # Initializing a Aimv2Config with apple/aimv2-large-patch14-224-lit style configuration >>> configuration = Aimv2Config() >>> # Initializing a Aimv2Model (with random weights) from the apple/aimv2-large-patch14-224-lit style configuration >>> model = Aimv2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Aimv2Config from a Aimv2TextConfig and a Aimv2VisionConfig >>> from transformers import Aimv2TextConfig, Aimv2VisionConfig >>> # Initializing a AIMv2Text and AIMv2Vision configuration >>> config_text = Aimv2TextConfig() >>> config_vision = Aimv2VisionConfig() >>> config = Aimv2Config(text_config=config_text, vision_config=config_vision) ```""" model_type = "aimv2" sub_configs = {"text_config": Aimv2TextConfig, "vision_config": Aimv2VisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `Aimv2TextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. initializing the `Aimv2VisionConfig` with default values.") self.text_config = Aimv2TextConfig(**text_config) self.vision_config = Aimv2VisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.max_logit_scale = 100.0 @classmethod def from_text_vision_configs(cls, text_config: Aimv2TextConfig, vision_config: Aimv2VisionConfig, **kwargs): r""" Instantiate a [`Aimv2Config`] (or a derived class) from aimv2 text model configuration and aimv2 vision model configuration. Returns: [`Aimv2Config`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) __all__ = ["Aimv2Config", "Aimv2VisionConfig", "Aimv2TextConfig"]