a
    hx7                     @   s   d dl Z d dlmZ d dlmZmZ d dlZddlm	Z	m
Z
mZ ddlmZ ddlmZ eeZdZd	d
iZg dZeddG dd deZdgZdS )    N)copyfile)AnyOptional   )
AddedTokenBatchEncodingPreTrainedTokenizer)logging)requiresu   ▁
vocab_filezsentencepiece.bpe.model)Zar_ARcs_CZde_DEen_XXZes_XXet_EEfi_FIZfr_XXgu_INhi_INit_ITZja_XXkk_KZko_KRlt_LTlv_LVZmy_MMne_NPZnl_XXro_ROru_RUsi_LKtr_TRvi_VNzh_CN)sentencepiece)backendsc                       s  e Zd ZU dZeZddgZg Zee	 e
d< g Zee	 e
d< dBeeeef  d fddZdd Zdd Zedd ZeedddZejeddddZdCee	 eee	  eee	 d fddZdDee	 eee	  ee	 ddd ZdEee	 eee	  ee	 dd!d"Zeee ee d#d$d%Zd&d' Zeee d(d)d*Zd+d, Zd-d. Zd/d0 Z dFeee e!e d1d2d3Z"dGee eeee  ee#d6 fd7d8Z$d9d: Z%d;d< Z&ddd=d>Z'edd?d@dAZ(  Z)S )HMBartTokenizeruT  
    Construct an MBART tokenizer.

    Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
    [SentencePiece](https://github.com/google/sentencepiece).

    The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
    <tokens> <eos>` for target language documents.

    Examples:

    ```python
    >>> from transformers import MBartTokenizer

    >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
    >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
    >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
    >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
    ```Z	input_idsZattention_maskprefix_tokenssuffix_tokens<s></s><unk><pad><mask>N)sp_model_kwargsc                    s  t |trt|dddn|}|d u r(i n|_tjf i j_jt| |_ddddd_	d_
tj_fd	d
ttD _dd
 j D _tjtj j
 j	d< j	j dd
 j	 D _tj  |d ur  fdd|D  t jf |||||||d |
| jd| |
d urX|
nd_jj _|_j d S )NTF)lstrip
normalizedr         r   )r$   r'   r%   r&   c                    s"   i | ]\}}| j |  j qS  )sp_model_sizefairseq_offset).0icodeselfr.   h/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/mbart/tokenization_mbart.py
<dictcomp>j   s   z+MBartTokenizer.__init__.<locals>.<dictcomp>c                 S   s   i | ]\}}||qS r.   r.   r1   kvr.   r.   r6   r7   m       r(   c                 S   s   i | ]\}}||qS r.   r.   r8   r.   r.   r6   r7   q   r;   c                    s   g | ]}| vr|qS r.   r.   )r1   t)_additional_special_tokensr.   r6   
<listcomp>w   r;   z+MBartTokenizer.__init__.<locals>.<listcomp>)	bos_token	eos_token	unk_token	sep_token	cls_token	pad_token
mask_tokentokenizer_filesrc_langtgt_langadditional_special_tokensr)   r   )
isinstancestrr   r)   spmSentencePieceProcessorsp_modelLoadr   fairseq_tokens_to_idsr0   lenr/   	enumerateFAIRSEQ_LANGUAGE_CODESlang_code_to_iditemsZid_to_lang_codeupdatefairseq_ids_to_tokenslistkeysextendsuper__init__	_src_langZcur_lang_code_idrH   set_src_lang_special_tokens)r5   r   r?   r@   rB   rC   rA   rD   rE   rF   rG   rH   r)   rI   kwargs	__class__)r=   r5   r6   r\   A   sR    	
 
zMBartTokenizer.__init__c                 C   s$   | j  }d |d< | j |d< |S )NrN   sp_model_proto)__dict__copyrN   serialized_model_proto)r5   stater.   r.   r6   __getstate__   s    
zMBartTokenizer.__getstate__c                 C   s<   || _ t| dsi | _tjf i | j| _| j| j d S )Nr)   )rc   hasattrr)   rL   rM   rN   ZLoadFromSerializedProtorb   )r5   dr.   r.   r6   __setstate__   s
    
zMBartTokenizer.__setstate__c                 C   s   t | jt | j | j d S )Nr,   )rQ   rN   rT   r0   r4   r.   r.   r6   
vocab_size   s    zMBartTokenizer.vocab_size)returnc                 C   s   | j S N)r]   r4   r.   r.   r6   rG      s    zMBartTokenizer.src_lang)new_src_langrl   c                 C   s   || _ | | j  d S rm   )r]   r^   )r5   rn   r.   r.   r6   rG      s    F)token_ids_0token_ids_1already_has_special_tokensrl   c                    sx   |rt  j||ddS dgt| j }dgt| j }|du rT|dgt|  | S |dgt|  dgt|  | S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)ro   rp   rq   r,   Nr   )r[   get_special_tokens_maskrQ   r"   r#   )r5   ro   rp   rq   Zprefix_onesZsuffix_onesr`   r.   r6   rr      s    z&MBartTokenizer.get_special_tokens_mask)ro   rp   rl   c                 C   s,   |du r| j | | j S | j | | | j S )ab  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:

        - `input_ids` (for encoder) `X [eos, src_lang_code]`
        - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`

        BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
        separator.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        N)r"   r#   )r5   ro   rp   r.   r.   r6    build_inputs_with_special_tokens   s    z/MBartTokenizer.build_inputs_with_special_tokensc                 C   sP   | j g}| jg}|du r.t|| | dg S t|| | | | | dg S )a  
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
        make use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `list[int]`: List of zeros.

        Nr   )Zsep_token_idZcls_token_idrQ   )r5   ro   rp   sepclsr.   r.   r6   $create_token_type_ids_from_sequences   s
    z3MBartTokenizer.create_token_type_ids_from_sequences)return_tensorsrG   rH   c                 K   sJ   |du s|du rt d|| _| |fd|d|}| |}||d< |S )zIUsed by translation pipeline, to prepare inputs for the generate functionNzATranslation requires a `src_lang` and a `tgt_lang` for this modelT)Zadd_special_tokensrw   Zforced_bos_token_id)
ValueErrorrG   Zconvert_tokens_to_ids)r5   Z
raw_inputsrw   rG   rH   extra_kwargsinputsZtgt_lang_idr.   r.   r6   _build_translation_inputs   s    
z(MBartTokenizer._build_translation_inputsc                    s(    fddt  jD }| j |S )Nc                    s   i | ]}  ||qS r.   )Zconvert_ids_to_tokens)r1   r2   r4   r.   r6   r7     r;   z,MBartTokenizer.get_vocab.<locals>.<dictcomp>)rangerk   rV   Zadded_tokens_encoder)r5   Zvocabr.   r4   r6   	get_vocab
  s    zMBartTokenizer.get_vocab)textrl   c                 C   s   | j j|tdS )N)Zout_type)rN   encoderK   )r5   r~   r.   r.   r6   	_tokenize  s    zMBartTokenizer._tokenizec                 C   s4   || j v r| j | S | j|}|r.|| j S | jS )z0Converts a token (str) in an id using the vocab.)rP   rN   Z	PieceToIdr0   Zunk_token_id)r5   tokenZspm_idr.   r.   r6   _convert_token_to_id  s    

z#MBartTokenizer._convert_token_to_idc                 C   s&   || j v r| j | S | j|| j S )z=Converts an index (integer) in a token (str) using the vocab.)rW   rN   Z	IdToPiecer0   )r5   indexr.   r.   r6   _convert_id_to_token  s    

z#MBartTokenizer._convert_id_to_tokenc                 C   s   d |td }|S )zIConverts a sequence of tokens (strings for sub-words) in a single string.  )joinreplaceSPIECE_UNDERLINEstrip)r5   tokensZ
out_stringr.   r.   r6   convert_tokens_to_string!  s    z'MBartTokenizer.convert_tokens_to_string)save_directoryfilename_prefixrl   c                 C   s   t j|s"td| d d S t j||r6|d ndtd  }t j| jt j|krzt j	| jrzt
| j| nLt j	| jst|d$}| j }|| W d    n1 s0    Y  |fS )NzVocabulary path (z) should be a directory-r   r   wb)ospathisdirloggererrorr   VOCAB_FILES_NAMESabspathr   isfiler   openrN   re   write)r5   r   r   Zout_vocab_filefiZcontent_spiece_modelr.   r.   r6   save_vocabulary&  s    (
(zMBartTokenizer.save_vocabularyr   r   )	src_textsrG   	tgt_textsrH   rl   c                    s"   || _ || _t j||fi |S rm   )rG   rH   r[   prepare_seq2seq_batch)r5   r   rG   r   rH   r_   r`   r.   r6   r   7  s    z$MBartTokenizer.prepare_seq2seq_batchc                 C   s   |  | jS rm   )r^   rG   r4   r.   r.   r6   _switch_to_input_modeC  s    z$MBartTokenizer._switch_to_input_modec                 C   s   |  | jS rm   )set_tgt_lang_special_tokensrH   r4   r.   r.   r6   _switch_to_target_modeF  s    z%MBartTokenizer._switch_to_target_modec                 C   s$   | j | | _g | _| j| jg| _dS )z_Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].NrT   Zcur_lang_coder"   Zeos_token_idr#   )r5   rG   r.   r.   r6   r^   I  s    z*MBartTokenizer.set_src_lang_special_tokens)langrl   c                 C   s$   | j | | _g | _| j| jg| _dS )zcReset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].Nr   )r5   r   r.   r.   r6   r   O  s    z*MBartTokenizer.set_tgt_lang_special_tokens)r$   r%   r%   r$   r&   r'   r(   NNNNN)NF)N)N)N)r   Nr   )*__name__
__module____qualname____doc__r   Zvocab_files_namesZmodel_input_namesr"   rX   int__annotations__r#   r   dictrK   r   r\   rg   rj   propertyrk   rG   setterboolrr   rs   rv   r{   r}   r   r   r   r   tupler   r   r   r   r   r^   r   __classcell__r.   r.   r`   r6   r!   %   s   
            N

   	   
r!   )r   shutilr   typingr   r   r   rL   Ztokenization_utilsr   r   r   utilsr	   Zutils.import_utilsr
   Z
get_loggerr   r   r   r   rS   r!   __all__r.   r.   r.   r6   <module>   s   
  2