
     `i7                         d dl mZ ddlmZ ddlmZ ddlmZ  ej        e	          Z
ddd	d
Z G d de          ZdgZdS )    )Optional   )PreTrainedTokenizerFast)logging   )HerbertTokenizerz
vocab.jsonz
merges.txtztokenizer.json)
vocab_filemerges_filetokenizer_filec            
           e Zd ZdZeZeZ	 	 	 	 	 	 	 	 d fd	Z	 dd	e	e
         d
ee	e
                  de	e
         fdZ	 dd	e	e
         d
ee	e
                  dede	e
         f fdZddedee         dee         fdZ xZS )HerbertTokenizerFastam  
    Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).

    Peculiarities:

    - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
      a punctuation character will be treated separately.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
    superclass for more information regarding methods.

    Args:
        vocab_file (`str`):
            Path to the vocabulary file.
        merges_file (`str`):
            Path to the merges file.
    N<s><unk><pad><mask></s>c	           
      L     t                      j        ||f||||||d|	 d S )N)r   	cls_token	unk_token	pad_token
mask_token	sep_token)super__init__)selfr	   r
   r   r   r   r   r   r   kwargs	__class__s             /home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/herbert/tokenization_herbert_fast.pyr   zHerbertTokenizerFast.__init__2   sX     	
	
 *!
	
 
	
 
	
 
	
 
	
 
	
 
	
    token_ids_0token_ids_1returnc                 R    | j         g}| j        g}|||z   |z   S ||z   |z   |z   |z   S )a  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. An HerBERT, like BERT sequence has the following format:

        - single sequence: `<s> X </s>`
        - pair of sequences: `<s> A </s> B </s>`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        )cls_token_idsep_token_id)r   r    r!   clsseps        r    build_inputs_with_special_tokensz5HerbertTokenizerFast.build_inputs_with_special_tokensJ   sJ    (  ! !$s**[ 3&4s::r   Falready_has_special_tokensc                     |r$t                                          ||d          S |dgdgt          |          z  z   dgz   S dgdgt          |          z  z   dgz   dgt          |          z  z   dgz   S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)r    r!   r)   Nr   r   )r   get_special_tokens_masklen)r   r    r!   r)   r   s       r   r+   z,HerbertTokenizerFast.get_special_tokens_maske   s    $ & 	7722'[]a 3    31#K 0 001QC77sqcC,,,-3sS=M=M7MNRSQTTTr   save_directoryfilename_prefixc                 b    | j         j                            ||          }t          |          S )N)name)
_tokenizermodelsavetuple)r   r-   r.   filess       r   save_vocabularyz$HerbertTokenizerFast.save_vocabulary   s+    %**>*PPU||r   )NNNr   r   r   r   r   )N)NF)__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesr   slow_tokenizer_classr   listintr   r(   boolr+   strr4   r6   __classcell__)r   s   @r   r   r      sW        $ *+ 
 
 
 
 
 
2 JN; ;9;3;DI3F;	c; ; ; ;8 sxU U9U3;DI3FUkoU	cU U U U U U6 c HSM ]bcf]g        r   r   N)typingr   tokenization_utils_fastr   utilsr   tokenization_herbertr   
get_loggerr7   loggerr;   r   __all__ r   r   <module>rK      s           > > > > > >       2 2 2 2 2 2 
	H	%	%#/`pqq f f f f f2 f f fR "
"r   