a
    hJP                     @   s   d Z ddlZddlZddlZddlmZ ddlmZ ddlZ	ddl
mZmZmZmZmZ ddlmZ eeZdd	d
Zedd Zdd Zdd Zdd ZG dd dZG dd deZdgZdS )zTokenization classes for CLIP.    N)	lru_cache)Optional   )
AddedTokenPreTrainedTokenizer_is_control_is_punctuation_is_whitespace)loggingz
vocab.jsonz
merges.txt)
vocab_filemerges_filec                  C   s   t ttdtdd t ttdtdd  t ttdtdd  } | dd }d	}td
D ],}|| vrf| | |d
|  |d7 }qfdd |D }tt| |S )a8  
    Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
    characters the bpe code barfs on.

    The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
    if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
    decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
    tables between utf-8 bytes and unicode strings.
    !~      ¡   ¬   ®   ÿNr      c                 S   s   g | ]}t |qS  )chr).0nr   r   f/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/clip/tokenization_clip.py
<listcomp>:       z$bytes_to_unicode.<locals>.<listcomp>)listrangeordappenddictzip)bscsr   br   r   r   bytes_to_unicode%   s    L

r%   c                 C   s6   t  }| d }| dd D ]}|||f |}q|S )z
    Return set of symbol pairs in a word.

    Word is represented as tuple of symbols (symbols being variable-length strings).
    r   r   N)setadd)wordpairsZ	prev_charcharr   r   r   	get_pairs>   s    r+   c                 C   s   t dd| } |  } | S )Nz\s+ )resubstrip)textr   r   r   whitespace_cleanL   s    r1   c                 C   s   |   } | sg S |  }|S )z@Runs basic whitespace cleaning and splitting on a piece of text.)r/   split)r0   tokensr   r   r   whitespace_tokenizeS   s
    r4   c                   @   sN   e Zd ZdZdddZdddZdd	 Zdd
dZdd Zdd Z	dd Z
dS )BasicTokenizera  
    Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).

    Args:
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        never_split (`Iterable`, *optional*):
            Collection of tokens which will never be split during tokenization. Only has an effect when
            `do_basic_tokenize=True`
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            Whether or not to tokenize Chinese characters.

            This should likely be deactivated for Japanese (see this
            [issue](https://github.com/huggingface/transformers/issues/328)).
        strip_accents (`bool`, *optional*):
            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
            value for `lowercase` (as in the original BERT).
        do_split_on_punc (`bool`, *optional*, defaults to `True`):
            In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
            the full context of the words, such as contractions.
    TNc                 C   s2   |d u rg }|| _ t|| _|| _|| _|| _d S N)do_lower_caser&   never_splittokenize_chinese_charsstrip_accentsdo_split_on_punc)selfr7   r8   r9   r:   r;   r   r   r   __init__t   s    
zBasicTokenizer.__init__c                 C   s   |r| j t|n| j }| |}| jr4| |}td|}t|}g }|D ]R}||vr| j	r|
 }| jdur| |}n| jr| |}|| || qPtd|}|S )aj  
        Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.

        Args:
            never_split (`List[str]`, *optional*)
                Kept for backward compatibility purposes. Now implemented directly at the base class level (see
                [`PreTrainedTokenizer.tokenize`]) List of token not to split.
        NFCFr,   )r8   unionr&   _clean_textr9   _tokenize_chinese_charsunicodedata	normalizer4   r7   lowerr:   _run_strip_accentsextend_run_split_on_puncjoin)r<   r0   r8   Zunicode_normalized_textZorig_tokensZsplit_tokenstokenZoutput_tokensr   r   r   tokenize   s$    




zBasicTokenizer.tokenizec                 C   sB   t d|}g }|D ]"}t |}|dkr,q|| qd|S )z$Strips accents from a piece of text.ZNFDZMn )rB   rC   categoryr   rH   )r<   r0   outputr*   catr   r   r   rE      s    
z!BasicTokenizer._run_strip_accentsc                 C   s   | j r|dur||v r|gS t|}d}d}g }|t|k r|| }t|r^||g d}n |rl|g  d}|d | |d7 }q0dd |D S )	z&Splits punctuation on a piece of text.Nr   TFr   c                 S   s   g | ]}d  |qS )rK   )rH   )r   xr   r   r   r      r   z5BasicTokenizer._run_split_on_punc.<locals>.<listcomp>)r;   r   lenr   r   )r<   r0   r8   charsiZstart_new_wordrM   r*   r   r   r   rG      s"    

z!BasicTokenizer._run_split_on_puncc                 C   sT   g }|D ]@}t |}| |r>|d || |d q|| qd|S )z)Adds whitespace around any CJK character.r,   rK   )r   _is_chinese_charr   rH   r<   r0   rM   r*   cpr   r   r   rA      s    


z&BasicTokenizer._tokenize_chinese_charsc                 C   s   |dkr|dks|dkr |dks|dkr0|dks|dkr@|dks|d	krP|d
ks|dkr`|dks|dkrp|dks|dkr|dkrdS dS )z6Checks whether CP is the codepoint of a CJK character.i N  i  i 4  iM  i   iߦ i  i? i@ i i  i i   i  i  i TFr   )r<   rV   r   r   r   rT      sD    
zBasicTokenizer._is_chinese_charc                 C   sX   g }|D ]D}t |}|dks|dkst|r.qt|rB|d q|| qd|S )zBPerforms invalid character removal and whitespace cleanup on text.r   i  r,   rK   )r   r   r	   r   rH   rU   r   r   r   r@      s    zBasicTokenizer._clean_text)TNTNT)N)N)__name__
__module____qualname____doc__r=   rJ   rE   rG   rA   rT   r@   r   r   r   r   r5   ]   s        

&
r5   c                       s   e Zd ZdZeZddgZd$ fdd	Zed	d
 Z	dd Z
d%ee eee  ee dddZd&ee eee  eee d fddZd'ee eee  ee dddZdd Zdd Zdd Zdd Zdd  Zd(eee ee d!d"d#Z  ZS ))CLIPTokenizera  
    Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            Path to the vocabulary file.
        merges_file (`str`):
            Path to the merges file.
        errors (`str`, *optional*, defaults to `"replace"`):
            Paradigm to follow when decoding bytes to UTF-8. See
            [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
        unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
            The beginning of sequence token.
        eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The end of sequence token.
        pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The token used for padding, for example when batching sequences of different lengths.
    Z	input_idsZattention_maskreplace<|endoftext|><|startoftext|>c                    s  t |trt|dddn|}t |tr4t|dddn|}t |trPt|dddn|}zdd l}	|	j| _W n0 ty   td tddd| _	d | _Y n0 t
|dd}
t|
| _W d    n1 s0    Y  dd	 | j D | _|| _t | _d
d	 | j D | _t
|dd*}|  ddd }W d    n1 sJ0    Y  dd |D }tt|tt|| _ddd| _tdtj| _ t! j"f |||||d| d S )NF)lstriprstripr   zKftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.)r:   r;   utf-8encodingc                 S   s   i | ]\}}||qS r   r   r   kvr   r   r   
<dictcomp>4  r   z*CLIPTokenizer.__init__.<locals>.<dictcomp>c                 S   s   i | ]\}}||qS r   r   rd   r   r   r   rg   7  r   
r   i  c                 S   s   g | ]}t | qS r   )tupler2   )r   merger   r   r   r   :  r   z*CLIPTokenizer.__init__.<locals>.<listcomp>r^   r]   )r^   r]   z[<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+)errors	unk_token	bos_token	eos_token	pad_token)#
isinstancestrr   ftfyfix_textImportErrorloggerinfor5   nlpopenjsonloadencoderitemsdecoderrk   r%   byte_encoderbyte_decoderreadr/   r2   r    r!   r   rQ   	bpe_rankscacher-   compile
IGNORECASEpatsuperr=   )r<   r   r   rk   rl   rm   rn   ro   kwargsrr   Zvocab_handleZmerges_handleZ
bpe_merges	__class__r   r   r=     sD    
*:zCLIPTokenizer.__init__c                 C   s
   t | jS r6   )rQ   r{   r<   r   r   r   
vocab_sizeL  s    zCLIPTokenizer.vocab_sizec                 C   s   t | jfi | jS r6   )r    r{   Zadded_tokens_encoderr   r   r   r   	get_vocabP  s    zCLIPTokenizer.get_vocabN)token_ids_0token_ids_1returnc                 C   s<   | j g}| jg}|du r$|| | S || | | | | S )a  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A CLIP sequence has the following format:

        - single sequence: `<|startoftext|> X <|endoftext|>`

        Pairs of sequences are not the expected use case, but they will be handled without a separator.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        N)bos_token_ideos_token_idr<   r   r   rm   rn   r   r   r    build_inputs_with_special_tokensS  s
    z.CLIPTokenizer.build_inputs_with_special_tokensF)r   r   already_has_special_tokensr   c                    sl   |rt  j||ddS |du r8dgdgt|  dg S dgdgt|  dg dg dgt|  dg S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)r   r   r   Nr   r   )r   get_special_tokens_maskrQ   )r<   r   r   r   r   r   r   r   n  s    z%CLIPTokenizer.get_special_tokens_maskc                 C   sP   | j g}| jg}|du r.t|| | dg S t|| | | | | dg S )a  
        Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
        zeros is returned.

        Args:
            token_ids_0 (`list[int]`):
                List of IDs.
            token_ids_1 (`list[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `list[int]`: List of zeros.
        Nr   )r   r   rQ   r   r   r   r   $create_token_type_ids_from_sequences  s
    z2CLIPTokenizer.create_token_type_ids_from_sequencesc           
         s|  | j v r j | S t|d d |d d f }t|}|sF|d S t| fddd}| jvrhqd|\}}g }d}|t|k r:z|||}	W n* ty   |||d   Y q:Y n0 ||||	  |	}|| |kr"|t|d k r"||d  |kr"|	||  |d7 }qx|	||  |d7 }qxt|}|}t|dkrZqdqFt|}qFd	
|}| j |< |S )
NrO   </w>c                    s    j | tdS )Ninf)r   getfloat)pairr   r   r   <lambda>  r   z#CLIPTokenizer.bpe.<locals>.<lambda>keyr   r      r,   )r   ri   r+   minr   rQ   index
ValueErrorrF   r   rH   )
r<   rI   r(   r)   ZbigramfirstsecondZnew_wordrS   jr   r   r   bpe  sB    


2




zCLIPTokenizer.bpec                    s   g } j du r"d j|}nt  | }t j|D ]B}d fdd|	dD }|
dd  |dD  qB|S )zTokenize a string.Nr,   rK   c                 3   s   | ]} j | V  qd S r6   )r~   )r   r$   r   r   r   	<genexpr>  s   z*CLIPTokenizer._tokenize.<locals>.<genexpr>ra   c                 s   s   | ]
}|V  qd S r6   r   )r   Z	bpe_tokenr   r   r   r     r   )rs   rH   rw   rJ   r1   rD   r-   findallr   encoderF   r   r2   )r<   r0   
bpe_tokensrI   r   r   r   	_tokenize  s    
"zCLIPTokenizer._tokenizec                 C   s   | j || j | jS )z0Converts a token (str) in an id using the vocab.)r{   r   rl   )r<   rI   r   r   r   _convert_token_to_id  s    z"CLIPTokenizer._convert_token_to_idc                 C   s   | j |S )z=Converts an index (integer) in a token (str) using the vocab.)r}   r   )r<   r   r   r   r   _convert_id_to_token  s    z"CLIPTokenizer._convert_id_to_tokenc                    s@   d |}t fdd|D }|jd jddd }|S )z:Converts a sequence of tokens (string) in a single string.rK   c                    s   g | ]} j | qS r   )r   )r   cr   r   r   r     r   z:CLIPTokenizer.convert_tokens_to_string.<locals>.<listcomp>ra   )rk   r   r,   )rH   	bytearraydecoderk   r\   r/   )r<   r3   r0   Z
byte_arrayr   r   r   convert_tokens_to_string  s    
z&CLIPTokenizer.convert_tokens_to_string)save_directoryfilename_prefixr   c           
   	   C   sT  t j|s"td| d d S t j||r6|d ndtd  }t j||rX|d ndtd  }t|ddd	.}|t	j
| jd
dddd  W d    n1 s0    Y  d}t|ddd	v}|d t| j dd dD ]D\}}	||	krtd| d |	}|d|d  |d7 }qW d    n1 sB0    Y  ||fS )NzVocabulary path (z) should be a directory-rK   r   r   wra   rb   r   TF)indent	sort_keysensure_asciirh   r   z#version: 0.2
c                 S   s   | d S )Nr   r   )kvr   r   r   r     r   z/CLIPTokenizer.save_vocabulary.<locals>.<lambda>r   zSaving vocabulary to zZ: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!r,   r   )ospathisdirru   errorrH   VOCAB_FILES_NAMESrx   writery   dumpsr{   sortedr   r|   warning)
r<   r   r   r   Z
merge_filefr   writerr   Ztoken_indexr   r   r   save_vocabulary  s.    <


*zCLIPTokenizer.save_vocabulary)r\   r]   r^   r]   r]   )N)NF)N)N)rW   rX   rY   rZ   r   Zvocab_files_namesZmodel_input_namesr=   propertyr   r   r   intr   r   boolr   r   r   r   r   r   r   rq   ri   r   __classcell__r   r   r   r   r[      sB        1
   *r[   )rZ   ry   r   rB   	functoolsr   typingr   regexr-   Ztokenization_utilsr   r   r   r   r	   utilsr
   Z
get_loggerrW   ru   r   r%   r+   r1   r4   r5   r[   __all__r   r   r   r   <module>   s.   


 "  