
     `iv!                         d Z ddlmZ ddlmZ ddlmZmZ  ej        e	          Z
 G d de          Z G d d	e          Zd	dgZd
S )zIdefics3 model configuration   )PretrainedConfig)logging   )CONFIG_MAPPING
AutoConfigc                   B     e Zd ZdZdZdZ	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )Idefics3VisionConfiga  
    This is the configuration class to store the configuration of a [`Idefics3VisionModel`]. It is used to instantiate a
    Idefics3 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics3 model
    [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.idefics3.modeling_idefics3 import Idefics3VisionTransformer
    >>> from transformers.models.idefics3.configuration_idefics3 import Idefics3VisionConfig

    >>> # Initializing a Idefics3VisionConfig with google/siglip-base-patch16-224 style configuration
    >>> configuration = Idefics3VisionConfig()

    >>> # Initializing a Idefics3VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
    >>> model = Idefics3VisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```idefics3_visionvision_config           r          gelu_pytorch_tanhư>        {Gz?c                      t                      j        di | || _        || _        || _        || _        || _        || _        || _        |
| _	        |	| _
        || _        || _        d S )N )super__init__hidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_channels
patch_size
image_sizeattention_dropoutlayer_norm_eps
hidden_actinitializer_range)selfr   r   r   r   r   r    r   r#   r"   r!   r$   kwargs	__class__s                /home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/idefics3/configuration_idefics3.pyr   zIdefics3VisionConfig.__init__O   s}     	""6"""&!2!2#6 ($$!2,$!2    )r   r   r   r   r   r   r   r   r   r   r   )__name__
__module____qualname____doc__
model_typebase_config_keyr   __classcell__r'   s   @r(   r	   r	      sw        1 1f #J%O &3 3 3 3 3 3 3 3 3 3r)   r	   c                   @     e Zd ZdZdZeedZ	 	 	 	 	 	 	 d fd
	Z xZ	S )Idefics3Configa  
    This is the configuration class to store the configuration of a [`Idefics3Model`]. It is used to instantiate a
    Idefics3 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the Idefics3
    [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import Idefics3Model, Idefics3Config
    >>> # Initializing configuration
    >>> configuration = Idefics3Config()
    >>> # Initializing a model from the configuration
    >>> model = Idefics3Model(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```idefics3)text_configr   T FNr    c                 f   || _         || _        || _        |.t                      | _        t
                              d           nCt          |t                    rt          d	i || _        nt          |t                    r|| _        t          |t                    r2|	                    dd          |d<   t          |d                  d	i |}n4|2t
                              d           t          d         d|d          }|| _        || _         t                      j        d	i |||d d S )
Nz2vision_config is None, using default vision configr.   llamaz.text_config is None, using default text configgh㈵>F)rms_norm_epspad_token_idtie_word_embeddings)r;   r<   r   )image_token_id	use_cacher<   r	   r   loggerinfo
isinstancedictgetr   r5   scale_factorr   r   )
r%   r>   r=   r<   r   r5   rD   r;   r&   r'   s
            r(   r   zIdefics3Config.__init__   sS    -"#6  !5!7!7DKKLMMMMt,, 	/!5!F!F!F!FD';<< 	/!.Dk4(( 		(3g(N(NK%(\)BCRRkRRKK KKHIII(1!)$)  K '(ff6fRefffffffr)   )Tr6   FNNr   r7   )
r*   r+   r,   r-   r.   r   r	   sub_configsr   r0   r1   s   @r(   r3   r3   m   s~        # #J J",?STTK !%g %g %g %g %g %g %g %g %g %gr)   r3   N)r-   configuration_utilsr   utilsr   autor   r   
get_loggerr*   r?   r	   r3   __all__r   r)   r(   <module>rK      s    # " 3 3 3 3 3 3       - - - - - - - - 
	H	%	%R3 R3 R3 R3 R3+ R3 R3 R3jNg Ng Ng Ng Ng% Ng Ng Ngb 3
4r)   