a
    h;                     @   s   d dl mZmZ d dlZddlmZ ddlmZ ddl	m
Z
mZmZmZmZmZ ddlmZmZ ddlmZ dd	lmZ eeZG d
d deddZG dd de
ZG dd deddZG dd deZdgZdS )    )OptionalUnionN   )BatchFeature)
ImageInput)ImagesKwargsMultiModalDataProcessingKwargsProcessorMixinUnpackVideosKwargs)PreTokenizedInput	TextInput)logging)
VideoInputc                   @   s"   e Zd ZU eee ef ed< dS )Glm4vVideosProcessorKwargsfpsN)__name__
__module____qualname__r   listfloat__annotations__ r   r   f/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/glm4v/processing_glm4v.pyr   $   s   
r   F)totalc                   @   s2   e Zd ZU ee ed< ee ed< ee ed< dS )Glm4vImagesKwargsZ
patch_sizeZtemporal_patch_size
merge_sizeN)r   r   r   r   intr   r   r   r   r   r   (   s   
r   c                   @   s2   e Zd ZU eed< dddddidZeed< dS )	Glm4vProcessorKwargsimages_kwargsF)paddingreturn_mm_token_type_idsreturn_metadataT)text_kwargsvideos_kwargsr%   N)r   r   r   r   r   	_defaultsr   r   r   r   r   r   .   s   
r   c                       sz   e Zd ZdZg dZdZdZdZd fdd	Zde	e
eeee ee f eee ed	d
dZdddZdddZ  ZS )Glm4vProcessora  
    Constructs a GLM-4V processor which wraps a GLM-4V image processor and a GLM-4 tokenizer into a single processor.
    [`~Glm4vProcessor.__call__`] and [`~Glm4vProcessor.decode`] for more information.
    Args:
        image_processor ([`Glm4vProcessor`], *optional*):
            The image processor is a required input.
        tokenizer ([`PreTrainedTokenizerFast`], *optional*):
            The tokenizer is a required input.
        video_processor ([`Glm4vVideoProcessor`], *optional*):
            The video processor is a required input.
        chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
            in a chat into a tokenizable string.
    )image_processor	tokenizervideo_processorZAutoImageProcessorZAutoVideoProcessor)ZPreTrainedTokenizerZPreTrainedTokenizerFastNc                    s   t  j||||d t|ds"dn|j| _t|ds8dn|j| _t|dd rR|jn
|| j| _t|dd rr|jn
|| j| _d S )N)chat_templateimage_tokenz	<|image|>video_tokenz	<|video|>image_token_idvideo_token_id)	super__init__hasattrr,   r-   getattrr.   Zconvert_tokens_to_idsr/   )selfr(   r)   r*   r+   kwargs	__class__r   r   r1   O   s    



zGlm4vProcessor.__init__)imagestextvideosr5   returnc                 K   s  | j tfd| jji|}|durD| jf d|i|d }|d }ni }d}|dur| jf d|i|d }d|vr|d	}	n|d	 }	|d
 }
ni }d}
t|ts|g}|	 }|dur:| jj
d }d}tt|D ]`}| j|| v r"||  | }|| | jd| d||< |d7 }q|| d| j||< q|
dur| jj
d }d}tt|D ]}| j|| v r|
| d }d}|	| }|jdu rtd |jdu rdn|j|_|jddd }g }tdt|D ]}|||  q|d| }t||k r*||r |d nd q t|D ],}|| }d| j dt| }||7 }q2|| | j|d||< |
|  | |
| d  }t|D ]2}| j|| v r|| | jd| d||< q|d7 }qf|| d| j||< q`|d dd}|d dd}| j|fi |d }| j||ddgd |r|t|d }t|d }d||| jk< | |d< ti ||||dS )a^
  
        Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
        and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
        the text.

        Args:
            images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
                The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
                tensor. Both channels-first and channels-last formats are supported.
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
                The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
                tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:
                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.

        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
              `None`).
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
            - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
            - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
            - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
        Ztokenizer_init_kwargsNr8   r    image_grid_thwr:   r%   r#   video_metadatavideo_grid_thw   r   z<|placeholder|>    a  SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results.   z<|begin_of_image|>z<|end_of_image|>r$   return_tensorsr"   FimageZvideo)Z
modalitiesZ	input_idsmm_token_type_ids)dataZtensor_type)Z_merge_kwargsr   r)   Zinit_kwargsr(   r*   pop
isinstancer   copyr   rangelenr,   prodreplacer-   r   loggerZwarning_once
timestampsappendr   Z_check_special_mm_tokensnparrayZ
zeros_liker.   tolistr   )r4   r8   r9   r:   r5   Zoutput_kwargsZimage_inputsr<   Zvideos_inputsr=   r>   Zmerge_lengthindexinum_image_tokensZvideo_indexZ
num_framesZvideo_structuremetadatarP   Zunique_timestampsidxZselected_timestampsZ	frame_idxZtimestamp_secZframe_structurerD   r"   Ztext_inputsZ	array_idsrF   r   r   r   __call__^   s    *





 zGlm4vProcessor.__call__c           	         s   i }|durnt jdi   |  ddp6jj fdd|D }fdd|D }|||d |durt jdi | fd	d|D }fd
d|D }||d< tf i |S )aK  
        Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
        Args:
            image_sizes (`list[list[int]]`, *optional*):
                The input sizes formatted as (height, width) per each image.
            video_sizes (`list[list[int]]`, *optional*):
                The input sizes formatted as (num_frames, height, width) per each video.
        Returns:
            `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
            input modalities, along with other useful data.
        Nr    r   c                    s"   g | ]}j jg | R  qS r   )r(   Zget_number_of_image_patches).0Z
image_size)r    r4   r   r   
<listcomp>   s   z=Glm4vProcessor._get_num_multimodal_tokens.<locals>.<listcomp>c                    s   g | ]}| d   qS r?   r   r[   Znum_patchesr   r   r   r\          )rW   num_image_patchesr%   c                    s"   g | ]} j jg |R  qS r   )r*   Zget_number_of_video_patches)r[   Z
video_size)r4   r%   r   r   r\     s   c                    s   g | ]}| d   qS r]   r   r^   r_   r   r   r\     r`   num_video_tokens)r   r&   getupdater(   r   r   )	r4   Zimage_sizesZvideo_sizesr5   Zvision_datara   rW   Znum_video_patchesrb   r   )r    r   r4   r%   r   _get_num_multimodal_tokens   s&    

z)Glm4vProcessor._get_num_multimodal_tokensTFc                 K   s   | j j|f||d|S )a  
        Post-process the output of the model to decode the text.

        Args:
            generated_outputs (`torch.Tensor` or `np.ndarray`):
                The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
                or `(sequence_length,)`.
            skip_special_tokens (`bool`, *optional*, defaults to `True`):
                Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
            **kwargs:
                Additional arguments to be passed to the tokenizer's `batch_decode method`.

        Returns:
            `list[str]`: The decoded text.
        )skip_special_tokensclean_up_tokenization_spaces)r)   Zbatch_decode)r4   Zgenerated_outputsrf   rg   r5   r   r   r   post_process_image_text_to_text
  s    z.Glm4vProcessor.post_process_image_text_to_text)NNNN)NNN)NN)TF)r   r   r   __doc__
attributesZimage_processor_classZvideo_processor_classZtokenizer_classr1   r   r   r   r   r   r   r   r   r   rZ   re   rh   __classcell__r   r   r6   r   r'   :   s(       
' r'   )typingr   r   numpyrR   Zfeature_extraction_utilsr   Zimage_utilsr   Zprocessing_utilsr   r   r	   r
   r   r   Ztokenization_utils_baser   r   utilsr   Zvideo_utilsr   Z
get_loggerr   rO   r   r   r   r'   __all__r   r   r   r   <module>   s    
 m