a
    hv=                     @   s   d dl mZmZ d dlZddlmZ ddlmZm	Z	m
Z
 ddlmZmZmZmZmZ ddlmZmZ ddlmZ G d	d
 d
eddZG dd deddZG dd deZdgZdS )    )OptionalUnionN   )BatchFeature)
ImageInputconcatenate_listmake_flat_list_of_images)ImagesKwargsMultiModalDataProcessingKwargsProcessorMixinUnpack)PreTokenizedInput	TextInput)
VideoInputc                   @   s2   e Zd ZU ee ed< ee ed< ee ed< dS )InternVLImagesKwargscrop_to_patchesZmin_patchesZmax_patchesN)__name__
__module____qualname__r   bool__annotations__int r   r   l/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/internvl/processing_internvl.pyr      s   
r   F)totalc                   @   s,   e Zd ZU eed< dddddii dZdS )	InternVLProcessorKwargsimages_kwargsleftF)Zpadding_sidereturn_mm_token_type_idsr   T)text_kwargsr   videos_kwargsN)r   r   r   r   r   	_defaultsr   r   r   r   r   !   s   
r   c                	       s   e Zd ZdZg dZdZdZdZded fd	d
Z	e
e e
e e
e ejejejdddZdee eeeee
e e
e f  ee ee edddZdddZedd Z  ZS )InternVLProcessoraM  
    Constructs a InternVL processor which wraps a [`AutoImageProcessor`] and
    [`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
    tokenizer functionalities. See the [`~InternVLProcessor.__call__`] and [`~InternVLProcessor.decode`] for more information.
    Args:
        image_processor ([`AutoImageProcessor`], *optional*):
            The image processor is a required input.
        tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
            The tokenizer is a required input.
        video_processor ([`AutoVideoProcessor`], *optional*):
            The video processor is a required input.
        image_seq_length (`int`, *optional*, defaults to 256):
            The number of image token to use per image patch. it should be set so that:
            image_seq_length = (config.image_size // config.patch_size) ** 2 * (config.scale_factor**2)
        chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
            in a chat into a tokenizable string.
    )image_processor	tokenizervideo_processorZAutoImageProcessorZAutoVideoProcessorZAutoTokenizerN   image_seq_lengthc                    sp   || _ |j| _|j| _|j| _|j| _|j| _|j| _|j| _	| j	| j| jg| _
t j|||fd|i| d S )Nchat_template)r)   start_image_tokenend_image_tokenZstart_image_token_idZend_image_token_idZcontext_image_tokenimage_tokenvideo_tokenZcontext_image_token_idZimage_token_id	image_idssuper__init__)selfr$   r%   r&   r)   r*   kwargs	__class__r   r   r1   G   s    	zInternVLProcessor.__init__)textimage_num_patchesvideo_num_patchesimage_num_patches_indicesvideo_num_patches_indicesvideo_patch_indicesc	                    s  d}	d}
g }g }g }|D ]}|}j |v s8j|v rj |v rڈj|vsd|j |jk r|	dkrx||	d  nd}||	 }||||  |j dd}|j j j ||	   j  |	d7 }	q"|
dkr||
d  nd}||
 }|
dkr|| nd}||d  }||||  t|||  d	 fddt
t D }|| |jdd}|
d7 }
q"d|v r|d}|d|d}q|| q|||	|
fS )z
        Processes interleaved text with <image> and <video> placeholders, replacing them with appropriate
        image and video tokens while keeping track of the patches used.
        r      z<placeholder>
c                 3   s>   | ]6}d |d  dj  jj  |   j V  qdS )Framer<   z: N)r+   r-   r)   r,   ).0inum_patchesr2   r   r   	<genexpr>   s   z?InternVLProcessor._insert_media_placeholders.<locals>.<genexpr>)r-   r.   indexappendreplacer+   r)   r,   listjoinrangelenpop)r2   r6   image_pixel_valuesvideo_pixel_valuesr7   r8   r9   r:   r;   image_indexvideo_indexZprocessed_textimage_video_patchesZreplace_stringspromptZ
new_promptstart_indexZ	end_indexZcurrent_patch_indexZend_patch_indexZvideo_promptZreplace_strr   rA   r   _insert_media_placeholders\   sL    

"





z,InternVLProcessor._insert_media_placeholders)imagesr6   videosr3   returnc              
   K   sL  |du rt d| jtfd| jji|}t|ttfs>|g}g }g }i }	d}
d}t	dg}t	dg}t	dg}|dur| j
|}t|}| j
f d|i|d }|d}|d}
t|}|dur0| jf d	|i|d
 }|d}dd |D }dd |D }t|}t|}|dd}|dusD|dur| ||
||||||\}}}}|dur|t|krt d|dur|t|krt ddt|i}	|d dd}|d dd}| j|fi |d }| j||dgd |r8t	|d }t|d }d|t|| j< | |d< ti ||	|dS )a	  
        Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
        and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text if `text`
        is not `None`, otherwise encode default OCR queries which depends on the `format`, `box`, `color`, `multi_page` and
        `crop_to_patches` arguments. To prepare the vision inputs, this method forwards the `images` and `kwrags` arguments to
        GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.

        Args:
            images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
                The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
                tensor. Both channels-first and channels-last formats are supported.
            text (`str`, `list[str]`, `list[list[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
                The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:
                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.

        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
              `None`).
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
        NzYou have to specify text.Ztokenizer_init_kwargsr   rT   r   rB   Zpixel_valuesrU   r!   Zpixel_values_videosc                 S   s   g | ]}t |qS r   )rJ   )r?   Zvideor   r   r   
<listcomp>       z.InternVLProcessor.__call__.<locals>.<listcomp>c                 S   s   g | ]}t |D ]}d qqS )r<   )rI   )r?   frames_r   r   r   rW      rX   r<   zONumber of image placeholders in the prompt does not match the number of images.zONumber of video placeholders in the prompt does not match the number of videos.r    return_tensorsr   image)Z
modalitiesZ	input_idsmm_token_type_ids)dataZtensor_type)
ValueErrorZ_merge_kwargsr   r%   Zinit_kwargs
isinstancerG   tuplenparrayr$   Zfetch_imagesr   rK   Zcumsumr&   flattenrS   rJ   r   Z_check_special_mm_tokensZ
zeros_likeisinr/   tolistr   )r2   rT   r6   ZaudiorU   r3   Zoutput_kwargsr7   r8   Zimage_videos_inputsrL   rM   r9   r;   r:   Zimage_inputsZvideo_inputsZnum_frames_per_videorP   rN   rO   r[   r   Ztext_inputsZ	array_idsr]   r   r   r   __call__   sx    )







zInternVLProcessor.__call__c                    sh   i }|durZt jdi   |  fdd|D }fdd|D }|||d tf i |S )a  
        Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.

        Args:
            image_sizes (`list[list[int]]`, *optional*):
                The input sizes formatted as (height, width) per each image.

        Returns:
            `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
            input modalities, along with other useful data.
        Nr   c                    s"   g | ]}j jg | R  qS r   )r$   Zget_number_of_image_patches)r?   Z
image_sizer   r2   r   r   rW     s   z@InternVLProcessor._get_num_multimodal_tokens.<locals>.<listcomp>c                    s   g | ]}d  j |  qS )   r(   )r?   rB   )r2   r   r   rW   #  rX   )num_image_tokensnum_image_patches)r   r"   getupdater
   )r2   Zimage_sizesr3   Zvision_datark   rj   r   rh   r   _get_num_multimodal_tokens  s    
z,InternVLProcessor._get_num_multimodal_tokensc                 C   s   | j j}| jj}|| S )N)r%   model_input_namesr$   )r2   Ztokenizer_input_namesZimage_processor_input_namesr   r   r   ro   (  s    z#InternVLProcessor.model_input_names)NNNr'   N)NNNN)N)r   r   r   __doc__
attributesZimage_processor_classZvideo_processor_classZtokenizer_classr   r1   rG   strrb   ZndarrayrS   r   r   r   r   r   r   r   r   r   rg   rn   propertyro   __classcell__r   r   r4   r   r#   /   sD        B    p
r#   )typingr   r   numpyrb   Zimage_processing_utilsr   Zimage_utilsr   r   r   Zprocessing_utilsr	   r
   r   r   r   Ztokenization_utils_baser   r   Zvideo_utilsr   r   r   r#   __all__r   r   r   r   <module>   s     