a
    h2                     @  s~   d dl mZ d dlZd dlmZ d dlmZ er<ddlmZ G dd dZ	G d	d
 d
e	Z
G dd de
ZG dd de
ZdS )    )annotationsN)Queue)TYPE_CHECKING   )AutoTokenizerc                   @  s    e Zd ZdZdd Zdd ZdS )BaseStreamerzG
    Base class from which `.generate()` streamers should inherit.
    c                 C  s
   t  dS )z;Function that is called by `.generate()` to push new tokensNNotImplementedErrorselfvalue r   ]/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/generation/streamers.pyput    s    zBaseStreamer.putc                 C  s
   t  dS )zHFunction that is called by `.generate()` to signal the end of generationNr   r   r   r   r   end$   s    zBaseStreamer.endN)__name__
__module____qualname____doc__r   r   r   r   r   r   r      s   r   c                   @  sL   e Zd ZdZddddddZdd	 Zd
d ZddddddZdd ZdS )TextStreamera)  
    Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer

        >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextStreamer(tok)

        >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
        >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
        An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
        ```
    Fr   bool)	tokenizerskip_promptc                 K  s(   || _ || _|| _g | _d| _d| _d S )Nr   T)r   r   decode_kwargstoken_cache	print_lennext_tokens_are_prompt)r   r   r   r   r   r   r   __init__K   s    zTextStreamer.__init__c                 C  s  t |jdkr&|jd dkr&tdnt |jdkr<|d }| jrR| jrRd| _dS | j|  | jj	| jfi | j
}|dr|| jd }g | _d| _njt |dkr| t|d r|| jd }|  jt |7  _n*|| j|dd  }|  jt |7  _| | dS )	zm
        Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
           r   z'TextStreamer only supports batch size 1FN
 )lenshape
ValueErrorr   r   r   extendtolistr   decoder   endswithr   _is_chinese_charordrfindon_finalized_text)r   r   textprintable_textr   r   r   r   U   s&    

zTextStreamer.putc                 C  s^   t | jdkrB| jj| jfi | j}|| jd }g | _d| _nd}d| _| j|dd dS )z;Flushes any remaining cache and prints a newline to stdout.r   N T)
stream_end)r#   r   r   r(   r   r   r   r-   )r   r.   r/   r   r   r   r   w   s    zTextStreamer.endstrr.   r1   c                 C  s   t |d|sdndd dS )zNPrints the new text to stdout. If the stream is ending, also prints a newline.Tr0   N)flushr   )printr   r.   r1   r   r   r   r-      s    zTextStreamer.on_finalized_textc                 C  s   |dkr|dks|dkr |dks|dkr0|dks|dkr@|dks|d	krP|d
ks|dkr`|dks|dkrp|dks|dkr|dkrdS dS )z6Checks whether CP is the codepoint of a CJK character.i N  i  i 4  iM  i   iߦ i  i? i@ i i  i i   i  i  i TFr   )r   cpr   r   r   r*      sD    
zTextStreamer._is_chinese_charN)F)F)	r   r   r   r   r   r   r   r-   r*   r   r   r   r   r   )   s   !
"r   c                      sN   e Zd ZdZddddd fdd	Zdd
ddddZdd Zdd Z  ZS )TextIteratorStreamera  
    Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
    useful for applications that benefit from accessing the generated text in a non-blocking way (e.g. in an interactive
    Gradio demo).

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        timeout (`float`, *optional*):
            The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
            in `.generate()`, when it is called in a separate thread.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
        >>> from threading import Thread

        >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextIteratorStreamer(tok)

        >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
        >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
        >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
        >>> thread.start()
        >>> generated_text = ""
        >>> for new_text in streamer:
        ...     generated_text += new_text
        >>> generated_text
        'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
        ```
    FNr   r   float | Noner   r   timeoutc                   s.   t  j||fi | t | _d | _|| _d S N)superr   r   
text_queuestop_signalr;   r   r   r   r;   r   	__class__r   r   r      s    zTextIteratorStreamer.__init__r2   r3   c                 C  s.   | j j|| jd |r*| j j| j| jd dS )\Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.r;   N)r>   r   r;   r?   r6   r   r   r   r-      s    z&TextIteratorStreamer.on_finalized_textc                 C  s   | S r<   r   r   r   r   r   __iter__   s    zTextIteratorStreamer.__iter__c                 C  s*   | j j| jd}|| jkr"t n|S d S NrD   )r>   getr;   r?   StopIterationr
   r   r   r   __next__   s    
zTextIteratorStreamer.__next__)FN)F)	r   r   r   r   r   r-   rE   rI   __classcell__r   r   rA   r   r8      s   . r8   c                      sN   e Zd ZdZddddd fdd	Zdd
ddddZdd Zdd Z  ZS )AsyncTextIteratorStreamera'	  
    Streamer that stores print-ready text in a queue, to be used by a downstream application as an async iterator.
    This is useful for applications that benefit from accessing the generated text asynchronously (e.g. in an
    interactive Gradio demo).

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        timeout (`float`, *optional*):
            The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
            in `.generate()`, when it is called in a separate thread.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Raises:
        TimeoutError: If token generation time exceeds timeout value.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, AsyncTextIteratorStreamer
        >>> from threading import Thread
        >>> import asyncio

        >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")

        >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
        >>> async def main():
        ...     # Important: AsyncTextIteratorStreamer must be initialized inside a coroutine!
        ...     streamer = AsyncTextIteratorStreamer(tok)
        ...     generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
        ...     thread = Thread(target=model.generate, kwargs=generation_kwargs)
        ...     thread.start()
        ...     generated_text = ""
        ...     async for new_text in streamer:
        ...         generated_text += new_text
        >>>     print(generated_text)
        >>> asyncio.run(main())
        An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
        ```
    FNr   r   r9   r:   c                   sF   t  j||fi | t | _d | _|| _t | _t	td| _
d S )Nr;   )r=   r   asyncior   r>   r?   r;   Zget_running_looploophasattrhas_asyncio_timeoutr@   rA   r   r   r     s    

z"AsyncTextIteratorStreamer.__init__r2   r3   c                 C  s.   | j | jj| |r*| j | jj| j dS )rC   N)rM   Zcall_soon_threadsafer>   
put_nowaitr?   r6   r   r   r   r-   (  s    z+AsyncTextIteratorStreamer.on_finalized_textc                 C  s   | S r<   r   r   r   r   r   	__aiter__.  s    z#AsyncTextIteratorStreamer.__aiter__c              	     s   zx| j rZt| j4 I d H & | j I d H }W d   I d H  qv1 I d H sN0    Y  ntj| j | jdI d H }W n tjy   t Y n0 || jkrt n|S d S rF   )	rO   rL   r;   r>   rG   wait_forTimeoutErrorr?   StopAsyncIterationr
   r   r   r   	__anext__1  s    < 
z#AsyncTextIteratorStreamer.__anext__)FN)F)	r   r   r   r   r   r-   rQ   rU   rJ   r   r   rA   r   rK      s   5 
rK   )
__future__r   rL   queuer   typingr   Zmodels.autor   r   r   r8   rK   r   r   r   r   <module>   s   yG