a
    hd.                     @   s   d Z ddlmZ ddlZddlmZ ddlmZm	Z	 ddl
mZmZmZmZ ddlmZmZ dd	lmZmZ e r~d
dlmZ eeZG dd deddZedddZdd ZG dd deZdgZdS )z
Processor class for Pixtral.
    )UnionN   )BatchFeature)
ImageInputis_valid_image)MultiModalDataProcessingKwargsProcessorMixinUnpack)PreTokenizedInput	TextInput)is_vision_availablelogging   )get_resize_output_image_sizec                   @   s"   e Zd Zdddi ddidZdS )PixtralProcessorKwargsF)paddingreturn_mm_token_type_idsreturn_tensorspt)text_kwargsimages_kwargsZcommon_kwargsN)__name__
__module____qualname__	_defaults r   r   j/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/pixtral/processing_pixtral.pyr   *   s   r   F)total)returnc                 C   s   t | to| dS )Nhttp)
isinstancestr
startswith)valr   r   r   is_url8   s    r%   c                 C   s   t | pt| S )N)r%   r   )elemr   r   r   is_image_or_image_url=   s    r'   c                       s~   e Zd ZdZddgZdZdZdeed fddZde	e
eeee ee f ee edddZdddZedd Z  ZS )PixtralProcessorab  
    Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor.

    [`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
    [`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information.

    Args:
        image_processor ([`PixtralImageProcessor`], *optional*):
            The image processor is a required input.
        tokenizer ([`LlamaTokenizerFast`], *optional*):
            The tokenizer is a required input.
        patch_size (`int`, *optional*, defaults to 16):
            Patch size from the vision tower.
        spatial_merge_size (`int`, *optional*, defaults to 1):
            The downsampling factor for the spatial merge operation.
        chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
            in a chat into a tokenizable string.
        image_token (`str`, *optional*, defaults to `"[IMG]"`):
            Special token used to denote image location.
        image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`):
            Special token used to denote the end of a line of pixels in an image.
        image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`):
            Special token used to denote the end of an image input.
    image_processor	tokenizerZAutoImageProcessorZAutoTokenizerN   r   [IMG][IMG_BREAK]	[IMG_END])
patch_sizespatial_merge_sizec	           
         s~   || _ || _|| _|| j| _|| _|| _|| j| _|| j| _|| j| _| j| j| jg| _	t
 j|||d d S )N)chat_template)r/   r0   image_tokenZconvert_tokens_to_idsZimage_token_idimage_break_tokenimage_end_tokenZimage_break_token_idZimage_end_token_id	image_idssuper__init__)
selfr)   r*   r/   r0   r1   r2   r3   r4   kwargs	__class__r   r   r7   _   s    zPixtralProcessor.__init__)imagestextr9   r   c                 K   s  | j tfd| jji|}| j| j }|durJ| j|fd|i|d }ni }t|tr`|g}n t|t	st|d tst
d|}	|ddurbt|d }
g }	g }|D ]}| j|v r0t|
\}}|| }|| }| jg| | jg g| }d	d
 |D }| j|d< d|}|| || jdd}qd|v rV|d}|d|d}q0|	| q|d dd}|d dd}| j|	fi |d ddi}| j|	|dgd |rt|d }t|d }d|t|| j< | |d< ti |||dS )a  
        Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
        and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
        the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
        CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
        of the above two methods for more information.

        Args:
            images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
                The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
                tensor. Both channels-first and channels-last formats are supported.
            text (`str`, `list[str]`, `list[list[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.

        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
            `None`).
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
        Ztokenizer_init_kwargsNr/   r   r   zAInvalid input text. Please provide a string, or a list of stringsZpixel_valuesimage_sizesc                 S   s   g | ]}|D ]}|qqS r   r   ).0Zsublistitemr   r   r   
<listcomp>       z-PixtralProcessor.__call__.<locals>.<listcomp> z<placeholder>r   r   r   r   Fimage)Z
modalitiesZ	input_idsmm_token_type_ids)dataZtensor_type)Z_merge_kwargsr   r*   Zinit_kwargsr/   r0   r)   r!   r"   list	TypeErrorgetiterr2   nextr3   r4   joinappendreplacepopZ_check_special_mm_tokensnparrayZ
zeros_likeisinr5   tolistr   )r8   r<   r=   ZaudioZvideosr9   Zoutput_kwargsr/   Zimage_inputsZprompt_stringsr>   Zreplace_stringssampleheightwidthnum_height_tokensnum_width_tokensZreplace_tokensZreplace_strr   r   Ztext_inputsZ	array_idsrF   r   r   r   __call__w   s`    )





zPixtralProcessor.__call__c                 K   s   i }|durt jdi }|| |ddp6| jj}| j| j }g }|D ]X\}}	tt	
||	df|d |d f||fd\}
}|
| }|| }||d |  qLdgt| }|||d tf i |S )	a  
        Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.

        Args:
            image_sizes (`list[list[int]]`, *optional*):
                The input sizes formatted as (height, width) per each image.

        Returns:
            `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
            input modalities, along with other useful data.
        Nr   sizer   Zlongest_edge)r[   r/   r   )num_image_tokensnum_image_patches)r   r   rJ   updater)   r[   r/   r0   r   rQ   ZzerosrN   lenr   )r8   r>   r9   Zvision_datar   r[   r/   r\   rV   rW   Zresized_heightZresized_widthrX   rY   r]   r   r   r   _get_num_multimodal_tokens   s&    

z+PixtralProcessor._get_num_multimodal_tokensc                 C   s   | j j}| jj}|| dg S )Nr>   )r*   model_input_namesr)   )r8   Ztokenizer_input_namesZimage_processor_input_namesr   r   r   ra      s    z"PixtralProcessor.model_input_names)NNr+   r   Nr,   r-   r.   )NNNN)N)r   r   r   __doc__
attributesZimage_processor_classZtokenizer_classintr7   r   r   r   r   rH   r
   r   r   rZ   r`   propertyra   __classcell__r   r   r:   r   r(   A   s:               d
$r(   ) rb   typingr   numpyrQ   Zfeature_extraction_utilsr   Zimage_utilsr   r   Zprocessing_utilsr   r   r	   r
   Ztokenization_utils_baser   r   utilsr   r   Zimage_processing_pixtralr   Z
get_loggerr   loggerr   boolr%   r'   r(   __all__r   r   r   r   <module>   s    
 F