a
    h1#                     @   s   d Z ddlZddlmZ ddlmZmZ ddlZddl	m
Z
 ddlmZ ddlmZ d	d
lmZ eeZddiZeddG dd de
ZdgZdS )z Tokenization class for SpeechT5.    N)copyfile)AnyOptional   )PreTrainedTokenizer)logging)requires   )EnglishNumberNormalizer
vocab_filezspm_char.model)sentencepiece)backendsc                       s  e Zd ZdZeZddgZd,eee	e
f  d	d
 fddZd-ddZedd Zedd Zejdd Zdd Zdd Zdd Ze	ee	 dddZdd Zdd  Zd!d" Zd.ee d#d$d%Zd/ee eee  eee d& fd'd(Zd0e	ee	 ee	 d)d*d+Z  ZS )1SpeechT5Tokenizera	  
    Construct a SpeechT5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
            contains the vocabulary necessary to instantiate a tokenizer.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The begin of sequence token.
        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sequence token.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        normalize (`bool`, *optional*, defaults to `False`):
            Whether to convert numeric quantities in the text to their spelt-out english counterparts.
        sp_model_kwargs (`dict`, *optional*):
            Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
            SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
            to set:

            - `enable_sampling`: Enable subword regularization.
            - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.

              - `nbest_size = {0,1}`: No sampling is performed.
              - `nbest_size > 1`: samples from the nbest_size results.
              - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
                using forward-filtering-and-backward-sampling algorithm.

            - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
              BPE-dropout.

    Attributes:
        sp_model (`SentencePieceProcessor`):
            The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
    Z	input_idsZattention_mask<s></s><unk><pad>FN)sp_model_kwargsreturnc           	   	      sj   |d u ri n|| _ || _|| _d | _tjf i | j | _| j| t j	f |||||| j d| d S )N)	bos_token	eos_token	unk_token	pad_token	normalizer   )
r   r   r   _normalizerspmSentencePieceProcessorsp_modelLoadsuper__init__)	selfr   r   r   r   r   r   r   kwargs	__class__ n/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/speecht5/tokenization_speecht5.pyr    Q   s     zSpeechT5Tokenizer.__init__c                 K   s0   | d| j}|rd| }|r(| |}||fS )Nr    )popr   
normalizer)r!   textZis_split_into_wordsr"   r   r%   r%   r&   prepare_for_tokenizationn   s    
z*SpeechT5Tokenizer.prepare_for_tokenizationc                 C   s
   | j  S N)r   Zget_piece_sizer!   r%   r%   r&   
vocab_sizev   s    zSpeechT5Tokenizer.vocab_sizec                 C   s   | j d u rt | _ | j S r,   )r   r
   r-   r%   r%   r&   r)   z   s    
zSpeechT5Tokenizer.normalizerc                 C   s
   || _ d S r,   )r   )r!   valuer%   r%   r&   r)      s    c                    s(    fddt  jD }| j |S )Nc                    s   i | ]}  ||qS r%   )Zconvert_ids_to_tokens).0ir-   r%   r&   
<dictcomp>       z/SpeechT5Tokenizer.get_vocab.<locals>.<dictcomp>)ranger.   updateZadded_tokens_encoder)r!   Zvocabr%   r-   r&   	get_vocab   s    zSpeechT5Tokenizer.get_vocabc                 C   s   | j  }d |d< |S )Nr   )__dict__copy)r!   stater%   r%   r&   __getstate__   s    
zSpeechT5Tokenizer.__getstate__c                 C   s<   || _ t| dsi | _tjf i | j| _| j| j d S )Nr   )r7   hasattrr   r   r   r   r   r   )r!   dr%   r%   r&   __setstate__   s
    
zSpeechT5Tokenizer.__setstate__)r*   r   c                 C   s   | j j|tdS )zPTake as input a string and return a list of strings (tokens) for words/sub-words)Zout_type)r   encodestr)r!   r*   r%   r%   r&   	_tokenize   s    zSpeechT5Tokenizer._tokenizec                 C   s   | j |S )z0Converts a token (str) in an id using the vocab.)r   Zpiece_to_id)r!   tokenr%   r%   r&   _convert_token_to_id   s    z&SpeechT5Tokenizer._convert_token_to_idc                 C   s   | j |}|S )z=Converts an index (integer) in a token (str) using the vocab.)r   Z	IdToPiece)r!   indexrA   r%   r%   r&   _convert_id_to_token   s    z&SpeechT5Tokenizer._convert_id_to_tokenc                 C   sp   g }d}d}|D ]F}|| j v rH|s*|d7 }|| j|| 7 }d}g }q|| d}q|| j|7 }| S )z:Converts a sequence of tokens (string) in a single string. Fr'   T)Zall_special_tokensr   decodeappendstrip)r!   tokensZcurrent_sub_tokensZ
out_stringZprev_is_specialrA   r%   r%   r&   convert_tokens_to_string   s    

z*SpeechT5Tokenizer.convert_tokens_to_string)r   c                 C   s$   |du r|| j g S || | j g S )z=Build model inputs from a sequence by appending eos_token_id.N)Zeos_token_id)r!   token_ids_0token_ids_1r%   r%   r&    build_inputs_with_special_tokens   s    z2SpeechT5Tokenizer.build_inputs_with_special_tokens)rK   rL   already_has_special_tokensr   c                    sV   |rt  j||ddS dg}|d u r6dgt| | S dgt| dgt|  | S )NT)rK   rL   rN   r	   r   )r   get_special_tokens_masklen)r!   rK   rL   rN   Zsuffix_onesr#   r%   r&   rO      s    z)SpeechT5Tokenizer.get_special_tokens_mask)save_directoryfilename_prefixr   c                 C   s   t j|s"td| d d S t j||r6|d ndtd  }t j| jt j|krzt j	| jrzt
| j| nLt j	| jst|d$}| j }|| W d    n1 s0    Y  |fS )NzVocabulary path (z) should be a directory-rE   r   wb)ospathisdirloggererrorjoinVOCAB_FILES_NAMESabspathr   isfiler   openr   Zserialized_model_protowrite)r!   rQ   rR   Zout_vocab_filefiZcontent_spiece_modelr%   r%   r&   save_vocabulary   s    (
(z!SpeechT5Tokenizer.save_vocabulary)r   r   r   r   FN)F)N)NF)N) __name__
__module____qualname____doc__r[   Zvocab_files_namesZmodel_input_namesr   dictr?   r   r    r+   propertyr.   r)   setterr6   r:   r=   listr@   rB   rD   rJ   intrM   boolrO   tuplera   __classcell__r%   r%   r#   r&   r   "   sD   *      




 r   )re   rU   shutilr   typingr   r   r   r   Ztokenization_utilsr   utilsr   Zutils.import_utilsr   Znumber_normalizerr
   Z
get_loggerrb   rX   r[   r   __all__r%   r%   r%   r&   <module>   s   
 =