
     `i!                         d Z ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
  e
j        e          Z G d d	e          Z G d
 de          Zd	gZdS )zPLBART model configuration    OrderedDict)Mapping   )PretrainedConfig)OnnxConfigWithPast)loggingc                   j     e Zd ZdZdZdgZddddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )PLBartConfiga  
    This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an
    PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the PLBART
    [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50005):
            Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`PLBartModel`].
        d_model (`int`, *optional*, defaults to 768):
            Dimensionality of the layers and the pooler layer.
        encoder_layers (`int`, *optional*, defaults to 6):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 6):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer decoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
        activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        classifier_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for classifier.
        max_position_embeddings (`int`, *optional*, defaults to 1024):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
            for more details.
        decoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
            for more details.
        scale_embedding (`bool`, *optional*, defaults to `True`):
            Scale embeddings by diving by sqrt(d_model).
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models)
        forced_eos_token_id (`int`, *optional*, defaults to 2):
            The id of the token to force as the last generated token when `max_length` is reached. Usually set to
            `eos_token_id`.

    Example:

    ```python
    >>> from transformers import PLBartConfig, PLBartModel

    >>> # Initializing a PLBART uclanlp/plbart-base style configuration
    >>> configuration = PLBartConfig()

    >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration
    >>> model = PLBartModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```plbartpast_key_valuesencoder_attention_headsd_modelinit_std)num_attention_headshidden_sizeinitializer_rangeU                      Tgelu   皙?{Gz?   r      c           	      ^   || _         || _        || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        || _        || _        |	| _        |
| _        || _        || _        || _        || _         t)                      j        d|||||d| d S )N)pad_token_idbos_token_ideos_token_idis_encoder_decoderforced_eos_token_id )
vocab_sizemax_position_embeddingsr   encoder_ffn_dimencoder_layersr   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functionr   encoder_layerdropdecoder_layerdropclassifier_dropout	use_cachenum_hidden_layersscale_embeddingsuper__init__)selfr'   r(   r*   r)   r   r,   r+   r-   r2   r3   r5   r$   r1   r   r.   r/   r0   r   r4   r7   r!   r"   r#   r%   kwargs	__class__s                             /home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/plbart/configuration_plbart.pyr9   zPLBartConfig.__init__n   s    8 %'>$.,'>$.,'>$!2"4#6  !2!2"4"!/. 	
%%%1 3	
 	
 	
 	
 	
 	
 	
    )r   r   r   r   r   r   r   r   r   r   TTr   r   r   r   r   r   r   Tr   r   r   r   )	__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr9   __classcell__)r<   s   @r=   r   r      s        G GR J#4"58 ' M  $ " ""37
 7
 7
 7
 7
 7
 7
 7
 7
 7
r>   r   c                       e Zd Zedeeeeef         f         fd            Zedeeeeef         f         fd            ZdS )PLBartOnnxConfigreturnc                 8    t          ddddfddddfg          S )N	input_idsbatchsequencer   r   attention_maskr   r:   s    r=   inputszPLBartOnnxConfig.inputs   s9    'j99:!w:#>#>?
 
 	
r>   c                     | j         r!t          ddddfddddfddddfg          S t          ddddfddddfg          S )Nlast_hidden_staterL   rM   rN   	past_keys)r   r   encoder_last_hidden_state)use_pastr   rP   s    r=   outputszPLBartOnnxConfig.outputs   s    = 	(g**E*EF g*"="=>0g*2M2MN   (g**E*EF0g*2M2MN  r>   N)	r?   r@   rA   propertyr   strintrQ   rW   r&   r>   r=   rH   rH      s        
WS#X%6 67 
 
 
 X
 gc3h&7!78    X  r>   rH   N)rB   collectionsr   collections.abcr   configuration_utilsr   onnxr   utilsr	   
get_loggerr?   loggerr   rH   __all__r&   r>   r=   <module>rc      s    !   # # # # # # # # # # # # 3 3 3 3 3 3 & & & & & &       
	H	%	%I
 I
 I
 I
 I
# I
 I
 I
X    )   : 
r>   