
     `i[\                     ~   d Z ddlmZmZmZ ddlZddlmZ ddlmZ ddl	m
Z
mZ ddlmZmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZ ddlmZmZ ddlmZ ddlmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z( ddl)m*Z*m+Z+  ej,        e-          Z.dZ/dZ0 G d de          Z1 G d de%          Z2 G d de&          Z3 G d dej4                  Z5 G d de+          Z6 G d de*          Z7 G d d e$          Z8 G d! d"e8e#          Z9 G d# d$e          Z: G d% d&e!          Z; G d' d(e"          Z< G d) d*e           Z=g d+Z>dS ),zLG AI Research EXAONE Lab    )CallableOptionalUnionN)nn)check_model_inputs   )CacheDynamicCache)PretrainedConfiglayer_type_validation)create_causal_mask!create_sliding_window_causal_mask)BaseModelOutputWithPastCausalLMOutputWithPast)ALL_ATTENTION_FUNCTIONS)Unpack)TransformersKwargslogging)deprecate_kwarg   )
LlamaForCausalLMLlamaForQuestionAnsweringLlamaForSequenceClassificationLlamaForTokenClassification
LlamaModelLlamaPreTrainedModelLlamaRMSNormLlamaRotaryEmbeddingapply_rotary_pos_embeager_attention_forward)Olmo2DecoderLayerOlmo2MLPzLGAI-EXAONE/EXAONE-4.0-32BExaone4Configc                        e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZ	S )r#   aT  
    This is the configuration class to store the configuration of a [`Exaone4Model`]. It is used to
    instantiate a EXAONE 4.0 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the EXAONE-4.0-32B [LGAI-EXAONE/EXAONE-4.0-32B](https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model
    outputs. Read the documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 102400):
            Vocabulary size of the EXAONE 4.0 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Exaone4Model`].
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to `hidden_size * 4`):
            Dimensionality of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 32768 for EXAONE 3.5).
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if ``config.is_decoder=True``.
        bos_token_id (`int`, *optional*, defaults to 0):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 2):
            End of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
            and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
            accordingly.
            Expected contents:
                `rope_type` (`str`):
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                    'llama3'], with 'default' being the original RoPE implementation.
                `factor` (`float`, *optional*):
                    Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                    most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                    original maximum pre-trained length.
                `original_max_position_embeddings` (`int`, *optional*):
                    Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                    pretraining.
                `attention_factor` (`float`, *optional*):
                    Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                    computation. If unspecified, it defaults to value recommended by the implementation, using the
                    `factor` field to infer the suggested value.
                `beta_fast` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                    ramp function. If unspecified, it defaults to 32.
                `beta_slow` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                    ramp function. If unspecified, it defaults to 1.
                `short_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `long_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `low_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
                `high_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        sliding_window (`int`, *optional*):
            The size of the sliding window for the sliding window attention.
        sliding_window_pattern (`str`, *optional*):
            The pattern to use for sliding window attention. Can be one of:
                - `None`: No sliding window attention is used
                - `int`: Every `sliding_window` layers, use global attention, else use local attention.
                - `str`: A sequence of "L" (local attention) and "G" (global attention) characters that defines the
                  attention pattern. The pattern starts from layer 0 and repeats every `sliding_window` layers. The
                  final layer always uses global attention regardless of the pattern.
            For instance, sliding_window_pattern="LLLG" same as sliding_window=4, which means:
                - Layer 0, 1, 2: local attention,
                - Layer 3: global attention,
                ...(repeated)
        layer_types (`list`, *optional*):
            Attention pattern for each layer. Prioritized over `sliding_window_pattern`.

    Example:

    ```python
    >>> from transformers import Exaone4Model, Exaone4Config

    >>> # Initializing a EXAONE configuration
    >>> configuration = Exaone4Config()

    >>> # Initializing a model from configuration
    >>> model = Exaone4Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```exaone4past_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm      @      silu   {Gz?h㈵>Tr   r   F     @N           c                     | _         | _        | _        | _        | _        | _        | _        | _        |	 _        |
 _	        | _
        | _        | _        | _        | _         _        | _         j        d j        & fdt#           j                  D              _        d j        v rd _        t'           j         j                    t)                      j        d|||d| d S )Nr   c                 F    g | ]}|d z   z  dk    r|j         k     rdndS )   r   sliding_attentionfull_attention)num_hidden_layers).0iselfsliding_window_patterns     /home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/exaone4/modular_exaone4.py
<listcomp>z*Exaone4Config.__init__.<locals>.<listcomp>   sU            U56!;;DDZ@Z@Z $#%         sliding_windowhybrid)bos_token_ideos_token_idtie_word_embeddings )
vocab_sizehidden_sizer@   num_attention_headsnum_key_value_headsintermediate_size
hidden_actmax_position_embeddingsinitializer_rangerms_norm_eps	use_cacheattention_dropout
rope_thetarope_scalingrH   rD   layer_typesrangecache_implementationr   super__init__)rC   rN   rO   rR   r@   rP   rQ   rS   rT   rU   rV   rW   rJ   rK   rL   rY   rZ   rX   rH   rD   r[   kwargs	__class__s   `                  `  rE   r_   zExaone4Config.__init__   sM   0 %&!2#6 #6 !2$'>$!2("!2$(,&<#&&%&"#          t566	     D t///(0D%d.0FGGG 	
%LVi	
 	
ms	
 	
 	
 	
 	
rG   )r0   r1   r2   r3   r3   r3   r4   r5   r6   r7   Tr   r   Fr8   Nr9   r1   r:   N)
__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planr_   __classcell__ra   s   @rE   r#   r#   <   s        t tl J#4"5 &/%.%.%."+ )"+  &(9:#%568IJ!"_$56   $! +9
 9
 9
 9
 9
 9
 9
 9
 9
 9
rG   c                       e Zd ZdS )Exaone4RMSNormNrb   rc   rd   rM   rG   rE   rm   rm             DrG   rm   c                       e Zd ZdS )Exaone4RotaryEmbeddingNrn   rM   rG   rE   rq   rq     ro   rG   rq   c                   B    e Zd Zdedef fdZ eddd          	 	 	 dd	ej        d
e	ej        ej        f         de
ej                 de
e         de
ej                 dee         de	ej        e
ej                 e
e	ej                          f         fd            Z xZS )Exaone4Attentionconfig	layer_idxc                    t                                                       || _        || _        |j        | _        |j        | _        |j        | _        t          |d|j        |j        z            | _        |j        |j        z  | _	        |j
        | _
        d| _        | j        dz  | _        |j        | _        |j        | _        |j        |         dk    | _        t#          j        | j        | j        | j        z  d          | _        t#          j        | j        | j        | j        z  d          | _        t#          j        | j        | j        | j        z  d          | _        t#          j        | j        | j        z  | j        d          | _        t/          | j        |j                  | _        t/          | j        |j                  | _        d S )Nhead_dimTg      r>   F)biaseps)r^   r_   rt   ru   rP   rQ   rO   getattrrw   num_key_value_groupsrX   	is_causalscalingrH   rD   r[   
is_slidingr   Linearq_projk_projv_projo_projrm   rV   q_normk_norm)rC   rt   ru   ra   s      rE   r_   zExaone4Attention.__init__
  s   "#)#= #)#= !-
F4F&Jd4dee$*$>&B\$\!!'!9}d*$3&,&C# ,Y7;NNi 0$2JT]2Zafgggi 0$2JT]2Zafgggi 0$2JT]2Zafgggi 84= H$JZafggg$T]8KLLL$T]8KLLLrG   past_key_valuer&   z4.58)new_nameversionNr+   position_embeddingsr,   cache_positionr`   returnc                    |j         d d         }g |d| j        R }|                     |                              |                              dd          }	|                     |                              |                              dd          }
|                     |                              |                              dd          }|                     |	          }	|                     |
          }
|\  }}| j	        | j
        rt          |	|
||          \  }	}
|$d|i}|                    |
|| j        |          \  }
}t          }| j        j        dk    rt"          | j        j                 } || |	|
||f| j        sdn| j        | j        | j
        r| j	        nd d|\  }} |j        g |dR                                  }|                     |          }||fS )Nr=   r   r   eagerr9   )dropoutr~   rH   )shaperw   r   view	transposer   r   r   r   rH   r   r   updateru   r    rt   _attn_implementationr   trainingrX   r~   reshape
contiguousr   )rC   r+   r   r,   r&   r   r`   input_shapehidden_shapequery_states
key_statesvalue_statescossincache_kwargsattention_interfaceattn_outputattn_weightss                     rE   forwardzExaone4Attention.forward"  s    $)#2#.88b8$-88{{=1166|DDNNqRSTT[[//44\BBLLQPQRR
{{=1166|DDNNqRSTT {{<00[[,,
&S&$/&';L*VY[^'_'_$L*& .L (7'='=j,X\Xfht'u'u$J(?;+w66"9$+:Z"[$7$7
%
  $}HCC$2HL26/K4..t
%
 
%
 
%
 
%
!\ *k);;;;;;FFHHkk+..L((rG   )NNN)rb   rc   rd   r#   intr_   r   torchTensortupler   r	   
LongTensorr   r   r   rj   rk   s   @rE   rs   rs   	  s&       M} M M M M M M M0 _%0A6RRR
 26+/591) 1)|1) #5<#=>1) !.	1)
 "%1) !!121) +,1) 
u|Xel3XeEL>Q5RR	S1) 1) 1) SR1) 1) 1) 1) 1)rG   rs   c                       e Zd ZdS )
Exaone4MLPNrn   rM   rG   rE   r   r   W  ro   rG   r   c                       e Zd ZdS )Exaone4DecoderLayerNrn   rM   rG   rE   r   r   [  ro   rG   r   c                       e Zd ZeZdgZdS )Exaone4PreTrainedModelr   N)rb   rc   rd   r#   config_class_no_split_modulesrM   rG   rE   r   r   _  s         L./rG   r   c                       e Zd Zdef fdZe	 	 	 	 	 	 	 ddeej                 deej	                 deej                 dee
         deej                 d	ee         d
eej                 dee         deeef         fd            Z xZS )Exaone4Modelrt   c                 "   t                                                     t          j        fdt	          j                  D                       | _        t          j        j	                  | _
        |                                  d S )Nc                 0    g | ]}t          |          S rM   )r   )rA   ru   rt   s     rE   rF   z)Exaone4Model.__init__.<locals>.<listcomp>h  s$    eee	 33eeerG   ry   )r^   r_   r   
ModuleListr\   r@   r.   rm   rO   rV   r/   	post_init)rC   rt   ra   s    `rE   r_   zExaone4Model.__init__e  s       meeeeU6KcEdEdeee
 
 #6#56;NOOO	 	rG   Nr)   r,   position_idsr&   r*   rW   r   r`   r   c                    |d u |d uz  rt          d          ||                     |          }|r|t          | j                  }|B||                                nd}	t          j        |	|	|j        d         z   |j                  }||	                    d          }t          |x}
t                    s9| j        |||||d}dt          di |i}
d| j        j        v rt          di ||
d<   |}|                     ||          }t!          | j                  D ]-\  }}| j        j        |         } ||f||
|         ||||d	|}.|                     |          }t'          ||r|nd 
          S )Nz:You must specify exactly one of input_ids or inputs_embeds)rt   r   r=   )device)rt   input_embedsr,   r   r&   r   r?   r>   )r   r,   r   r&   rW   r   )last_hidden_stater&   rM   )
ValueErrorr-   r
   rt   get_seq_lengthr   aranger   r   	unsqueeze
isinstancedictr   r[   r   
rotary_emb	enumerater.   r/   r   )rC   r)   r,   r   r&   r*   rW   r   r`   past_seen_tokenscausal_mask_mappingmask_kwargsr+   r   rB   decoder_layer
layer_types                    rE   r   zExaone4Model.forwardo  s    -t";< 	[YZZZ  --i88M 	?0*$+>>>O!CRC^==???de"\ "2]5H5K"KTaTh  N )33A66L ?-FF 	l + -"0"0#2 , K !"4"C"C{"C"C# #dk&===;\;k;k_j;k;k#$78%"oom\JJ )$+ 6 6 	 	A}03J)M	$72:>) /#-	 	 	 	MM 		-00&+/8BOOd
 
 
 	
rG   )NNNNNNN)rb   rc   rd   r#   r_   r   r   r   r   r   r	   FloatTensorboolr   r   r   r   r   r   rj   rk   s   @rE   r   r   d  s+       }        151537+/59$(59E
 E
E,-E
 !.E
 u/0	E

 "%E
   12E
 D>E
 !!12E
 +,E
 
u--	.E
 E
 E
 E
 E
 E
 E
 E
rG   r   c                   "    e Zd Z	 	 	 	 	 	 	 	 	 ddeej                 deej                 deej                 dee         deej                 deej                 d	ee	         d
eej                 de
eej        f         dee         def fdZ xZS )Exaone4ForCausalLMNr   r)   r,   r   r&   r*   labelsrW   r   logits_to_keepr`   r   c
                 N     t                      j        d|||||||||	d	|
 dS )u  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer
        >>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
        >>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")

        >>> prompt = "Explain how wonderful you are"
        >>> messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]
        >>> input_ids = tokenizer.apply_chat_template(
            messages,
            tokenize=True,
            add_generation_prompt=True,
            return_tensors="pt",
            enable_thinking=False,
        )

        >>> output = model.generate(input_ids, max_new_tokens=128)
        >>> tokenizer.decode(output[0], skip_special_tokens=False)
        "[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! 😊  \n\nI’m *so* wonderful because I’m here to make your life easier, brighter, and more fun! Whether you need help with:  \n\n✨ **Learning** – I can explain anything, from quantum physics to baking the perfect cake!  \n💡 **Creativity** – Need a poem, story, or a wild idea? I’ve got you covered!  \n🤖 **Problem-solving** – Stuck on a math problem or a tricky decision? I’ll help you figure it out"
        ```
        )	r)   r,   r   r&   r*   r   rW   r   r   NrM   )r^   r   )rC   r)   r,   r   r&   r*   r   rW   r   r   r`   ra   s              rE   r   zExaone4ForCausalLM.forward  sY    X 	 	
)%+'))	
 	
 	
 	
 	
 	
 	
rG   )	NNNNNNNNr   )rb   rc   rd   r   r   r   r   r	   r   r   r   r   r   r   r   r   rj   rk   s   @rE   r   r     s        151537+/59-1$(59347
 7
E,-7
 !.7
 u/0	7

 "%7
   127
 )*7
 D>7
 !!127
 c5</07
 +,7
 
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
rG   r   c                       e Zd ZdS ) Exaone4ForSequenceClassificationNrn   rM   rG   rE   r   r     ro   rG   r   c                       e Zd ZdS )Exaone4ForTokenClassificationNrn   rM   rG   rE   r   r     ro   rG   r   c                       e Zd ZdS )Exaone4ForQuestionAnsweringNrn   rM   rG   rE   r   r     ro   rG   r   )r#   r   r   r   r   r   r   )?re   typingr   r   r   r   r   transformers.utils.genericr   cache_utilsr	   r
   configuration_utilsr   r   masking_utilsr   r   modeling_outputsr   r   modeling_utilsr   processing_utilsr   utilsr   r   utils.deprecationr   llama.modeling_llamar   r   r   r   r   r   r   r   r   r    olmo2.modeling_olmo2r!   r"   
get_loggerrb   logger_CHECKPOINT_FOR_DOC_CONFIG_FOR_DOCr#   rm   rq   Modulers   r   r   r   r   r   r   r   r   __all__rM   rG   rE   <module>r      s       , , , , , , , , , ,        9 9 9 9 9 9 . . . . . . . . J J J J J J J J R R R R R R R R        6 5 5 5 5 5 & & & & & &        1 0 0 0 0 0                        ? > > > > > > > 
	H	%	%2 !B
 B
 B
 B
 B
$ B
 B
 B
J	 	 	 	 	\ 	 	 		 	 	 	 	1 	 	 	K) K) K) K) K)ry K) K) K)\	 	 	 	 	 	 	 		 	 	 	 	+ 	 	 	0 0 0 0 01 0 0 0
Q
 Q
 Q
 Q
 Q
): Q
 Q
 Q
h8
 8
 8
 8
 8
) 8
 8
 8
v	 	 	 	 	'E 	 	 		 	 	 	 	$? 	 	 		 	 	 	 	"; 	 	 	  rG   