
     `iV.                         d dl mZ d dlZd dlmZ ddlmZmZ ddlmZ ddl	m
Z
mZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZm Z m!Z!m"Z"  G d dee          Z# G d de          Z$ G d de          Z% G d de!          Z& G d de          Z' G d de           Z( G d de"          Z) G d de          Z* G d d e          Z+ G d! d"e          Z, G d# d$e          Z- G d% d&e          Z.g d'Z/dS )(    )OptionalN)nn   )CacheDynamicCache)PretrainedConfig)create_causal_mask!create_sliding_window_causal_mask)BaseModelOutputWithPast)Unpack)TransformersKwargsauto_docstring)check_model_inputs   )MistralConfig)Qwen2AttentionQwen2DecoderLayerQwen2ForCausalLMQwen2ForQuestionAnsweringQwen2ForSequenceClassificationQwen2ForTokenClassificationQwen2MLP
Qwen2ModelQwen2PreTrainedModelQwen2RMSNormQwen2RotaryEmbeddingc                   F    e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZdS )MinistralConfiga  
    This is the configuration class to store the configuration of a [`MinistralModel`]. It is used to instantiate an
    Ministral model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Ministral-8B-Instruct-2410.

    [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)
    [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 32000):
            Vocabulary size of the Ministral model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`MinistralModel`]
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 14336):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
        head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
            The attention head dimension.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
            The maximum sequence length that this model might ever be used with. Ministral's sliding window attention
            allows sequence of up to 4096*32 tokens.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*):
            The id of the padding token.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the "beginning-of-sequence" token.
        eos_token_id (`int`, *optional*, defaults to 2):
            The id of the "end-of-sequence" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether the model's input and output word embeddings should be tied.
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        sliding_window (`int`, *optional*, defaults to 4096):
            Sliding window attention window size. If not specified, will default to `4096`.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.

    ```python
    >>> from transformers import MinistralModel, MinistralConfig

    >>> # Initializing a Ministral 8B style configuration
    >>> configuration = MinistralConfig()

    >>> # Initializing a model from the Ministral 8B style configuration
    >>> model = MinistralModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```	ministral }      8         Nsilu   {Gz?ư>T   r   F     @        c                 V   t          j        | f||||d| || _        |	| _        || _        || _        || _        || _        || _        || _	        ||}|| _
        || _        |
| _        || _        || _        || _        || _        || _        | j        | j        dndg|z  | _        d S d S )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingssliding_attentionfull_attention)r   __init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headssliding_windowhead_dimnum_key_value_heads
hidden_actinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_dropoutlayer_types)selfr4   r6   r7   r8   r9   r<   r;   r=   r5   r>   r?   r@   r-   r.   r/   r0   rA   r:   rB   rC   kwargss                         /home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/ministral/modular_ministral.pyr3   zMinistralConfig.__init__k   s   0 	!	
%%% 3	
 	
 	
 	
 	
 %'>$&!2!2#6 ,  &"5#6 $!2("$!2&#'+':'F##L\ ! "D $#    )r    r!   r"   r#   r#   r$   Nr%   r&   r'   r(   TNr)   r   Fr*   r!   r+   N)__name__
__module____qualname____doc__
model_typer3    rG   rF   r   r      sz        I IV J  )!+9" 9" 9" 9" 9" 9"rG   r   c                       e Zd ZdS )MinistralMLPNrH   rI   rJ   rM   rG   rF   rO   rO              DrG   rO   c                   $     e Zd Zdef fdZ xZS )MinistralAttention	layer_idxc                 `   t                                          ||           t          j        |j        |j        | j        z  d          | _        t          j        |j        |j        | j        z  d          | _	        t          j        |j        |j        | j        z  d          | _
        d S )NF)bias)superr3   r   Linearr6   r9   r;   q_projr<   k_projv_proj)rD   configrT   	__class__s      rF   r3   zMinistralAttention.__init__   s    +++i 2F4NQUQ^4^ejkkki 2F4NQUQ^4^ejkkki 2F4NQUQ^4^ejkkkrG   )rH   rI   rJ   intr3   __classcell__r]   s   @rF   rS   rS      sO        l# l l l l l l l l l lrG   rS   c                       e Zd ZdS )MinistralRMSNormNrP   rM   rG   rF   rb   rb      rQ   rG   rb   c                       e Zd ZdS )MinistralDecoderLayerNrP   rM   rG   rF   rd   rd      rQ   rG   rd   c                       e Zd ZdS )MinistralPreTrainedModelNrP   rM   rG   rF   rf   rf      rQ   rG   rf   c                       e Zd ZdS )MinistralRotaryEmbeddingNrP   rM   rG   rF   rh   rh      rQ   rG   rh   c                       e Zd Zdef fdZee	 	 	 	 	 	 	 ddeej	                 deej
                 deej	                 dee         deej                 d	ee         d
eej	                 dee         defd                        Z xZS )MinistralModelr\   c                 N    t                                          |           | `d S )N)rW   r3   has_sliding_layers)rD   r\   r]   s     rF   r3   zMinistralModel.__init__   s'       ###rG   N	input_idsattention_maskposition_idspast_key_valuesinputs_embedsr@   cache_positionrE   returnc                    |d u |d uz  rt          d          ||                     |          }|r|t          | j                  }|B||                                nd}	t          j        |	|	|j        d         z   |j                  }||	                    d          }t          |x}
t                    s'| j        |||||d}t          d
i |t          d
i |d}
|}|                     ||          }| j        d | j        j                 D ]} ||f|
|j                 |||||d|}|                     |          }t'          ||r|nd 	          S )Nz:You must specify exactly one of input_ids or inputs_embeds)r\   r   r)   )device)r\   input_embedsrn   rr   rp   ro   )r2   r1   )rn   ro   rp   r@   rr   position_embeddings)last_hidden_staterp   rM   )
ValueErrorembed_tokensr   r\   get_seq_lengthtorcharangeshaperu   	unsqueeze
isinstancedictr	   r
   
rotary_emblayersr8   attention_typenormr   )rD   rm   rn   ro   rp   rq   r@   rr   rE   past_seen_tokenscausal_mask_mappingmask_kwargshidden_statesrw   decoder_layers                  rF   forwardzMinistralModel.forward   s    -t";< 	[YZZZ  --i88M 	?0*$+>>>O!CRC^==???de"\ "2]5H5K"KTaTh  N )33A66L ?-FF 	 + -"0"0#2 , K #5"C"C{"C"C%F%U%U%U%U# #
 & #oom\JJ![)H4;+H)HI 
	 
	M)M	2=3OP) /#-$7	 	 	 	MM 		-00&+/8BOOd
 
 
 	
rG   )NNNNNNN)rH   rI   rJ   r   r3   r   r   r   r|   
LongTensorTensorr   FloatTensorboolr   r   r   r   r_   r`   s   @rF   rj   rj      s*       $ $ $ $ $ $ $  151537+/59$(59C
 C
E,-C
 !.C
 u/0	C

 "%C
   12C
 D>C
 !!12C
 +,C
 
!C
 C
 C
 ^ C
 C
 C
 C
 C
rG   rj   c                       e Zd ZdS )MinistralForCausalLMNrP   rM   rG   rF   r   r     rQ   rG   r   c                       e Zd ZdS )"MinistralForSequenceClassificationNrP   rM   rG   rF   r   r     rQ   rG   r   c                       e Zd ZdS )MinistralForTokenClassificationNrP   rM   rG   rF   r   r     rQ   rG   r   c                       e Zd ZdS )MinistralForQuestionAnsweringNrP   rM   rG   rF   r   r     rQ   rG   r   )r   rf   rj   r   r   r   r   )0typingr   r|   r   cache_utilsr   r   configuration_utilsr   masking_utilsr	   r
   modeling_outputsr   processing_utilsr   utilsr   r   utils.genericr   mistral.configuration_mistralr   qwen2.modeling_qwen2r   r   r   r   r   r   r   r   r   r   r   r   rO   rS   rb   rd   rf   rh   rj   r   r   r   r   __all__rM   rG   rF   <module>r      s                . . . . . . . . 3 3 3 3 3 3 R R R R R R R R 7 7 7 7 7 7 & & & & & & 7 7 7 7 7 7 7 7 / / / / / / 9 9 9 9 9 9                         G" G" G" G" G"m%5 G" G" G"T	 	 	 	 	8 	 	 	l l l l l l l l	 	 	 	 	| 	 	 		 	 	 	 	- 	 	 		 	 	 	 	3 	 	 		 	 	 	 	3 	 	 	J
 J
 J
 J
 J
Z J
 J
 J
Z	 	 	 	 	+ 	 	 		 	 	 	 	)G 	 	 		 	 	 	 	&A 	 	 		 	 	 	 	$= 	 	 	  rG   