a
    h                     @   s  d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZ d dlZddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z, ddl-m.Z. ddl/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8 e& r0d dl9Z9e( rZe) rNd dl:m;Z< nd dl=m;Z< e*>e?Z@dZAe!deAe.ddG dd deZBe"eBjCeB_CeBjCjDdureBjCjDjEddddeBjC_DdS )    N)deepcopy)partial)AnyCallableOptionalUnion   )custom_object_save)BatchFeatureget_size_dict)BaseImageProcessorFast)ChannelDimensionSizeDictvalidate_kwargs)UnpackVideosKwargs)IMAGE_PROCESSOR_NAMEPROCESSOR_NAMEVIDEO_PROCESSOR_NAME
TensorTypeadd_start_docstrings	copy_funcdownload_urlis_offline_modeis_remote_urlis_torch_availableis_torchcodec_availableis_torchvision_availableis_torchvision_v2_availablelogging)cached_files)requires)	
VideoInputVideoMetadatagroup_videos_by_shapeis_valid_video
load_videomake_batched_metadatamake_batched_videosreorder_videosto_channel_dimension_format)
functionalaQ  
    Args:
        do_resize (`bool`, *optional*, defaults to `self.do_resize`):
            Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `self.size`):
            Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
            The size by which to make sure both the height and width can be divided.
        default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
            Whether to default to a square video when resizing, if size is an int.
        resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
            Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
            overridden by the `resample` parameter in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
            Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        do_pad (`bool`, *optional*):
            Whether to pad the video to the `(max_height, max_width)` of the videos in the batch.
        crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`):
            Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
            Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
            `do_rescale` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
            Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
            overridden by the `rescale_factor` parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
            Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
        image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
            Mean to use if normalizing the video. This is a float or list of floats the length of the number of
            channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
            overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
            Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
            number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
            Whether to convert the video to RGB.
        video_metadata (`VideoMetadata`, *optional*):
            Metadata of the video containing information about total duration, fps and total number of frames.
        do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`):
            Whether to sample frames from the video before processing or to process the whole video.
        num_frames (`int`, *optional*, defaults to `self.num_frames`):
            Maximum number of frames to sample when `do_sample_frames=True`.
        fps (`int` or `float`, *optional*, defaults to `self.fps`):
            Target frames to sample per second when `do_sample_frames=True`.
        return_tensors (`str` or `TensorType`, *optional*):
            Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
        data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
            The channel dimension format for the output video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - Unset: Use the channel dimension format of the input video.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input video. If unset, the channel dimension format is inferred
            from the input video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
        device (`torch.device`, *optional*):
            The device to process the videos on. If unset, the device is inferred from the input videos.
        return_metadata (`bool`, *optional*):
            Whether to return video metadata or not.
        z!Constructs a base VideoProcessor.)Zvisiontorchvision)backendsc                       s  e Zd ZdZdZdZdZdZdZdZ	dZ
dZdZdZdZdZdZdZdZdZdZdZdZeZdgZee dd fddZed	d
dZdedddZd?e e!e" e!e#e"e$f  dddZ%d@ee#e e&f e!e' e!e( e)d dddZ*dAee!e#e+e,f  e!e+ e)d dddZ-e.e/eee edddZ0dBe)d e'e'e1e!e" e!d e'e1e'e'e$e'e!e#e$e)e$ f  e!e#e$e)e$ f  e!e#e+e2f  edddZ3e4dCe#e+e5j6f e!e#e+e5j6f  e'e'e!e#e+e'f  e+d!d"d#Z7dDe#e+e5j6f e'd$d%d&Z8e4e#e+e5j6f e9e&e+e:f e&e+e:f f d'd(d)Z;e4e&e+e:f d*d+d,Z<e&e+e:f d	d-d.Z=e+d	d/d0Z>e#e+e5j6f d1d2d3Z?d4d5 Z@e4e#e+e5j6f d6d7d8ZAe4dEd:d;ZBdFe#e+e)e+ e)e)e+  f d<d=d>ZC  ZDS )GBaseVideoProcessorNTgp?Fpixel_values_videos)kwargsreturnc                    s6  t    |dd | _| D ]^\}}zt| || W q  ty| } z,td| d| d|   |W Y d }~q d }~0 0 q |d| j	}|d urt
||d| jdnd | _	|d| j}|d urt
|dd	nd | _t| jj | _| jD ]>}||d urt| |||  qt| |tt| |d  qd S )
Nprocessor_classz
Can't set z with value z for sizedefault_to_square)r3   r4   	crop_size)
param_name)super__init__pop_processor_classitemssetattrAttributeErrorloggererrorr3   r   r4   r5   listvalid_kwargs__annotations__keysmodel_valid_processing_keysgetr   getattr)selfr0   keyvalueerrr3   r5   	__class__ _/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/video_processing_utils.pyr8      s(    

zBaseVideoProcessor.__init__)r1   c                 K   s   | j |fi |S )N)
preprocess)rG   videosr0   rM   rM   rN   __call__   s    zBaseVideoProcessor.__call__ztorch.Tensor)videor1   c                 C   s   t |}|jd dks8|ddddddf dk  s<|S |ddddddf d }d|ddddddf  d |ddddddf |dddddddf   }|S )z
        Converts a video to RGB format.

        Args:
            video (`"torch.Tensor"`):
                The video to convert.

        Returns:
            `torch.Tensor`: The converted video.
           .N   g     o@r   )FZgrayscale_to_rgbshapeany)rG   rR   alpharM   rM   rN   convert_to_rgb   s    
.Tz!BaseVideoProcessor.convert_to_rgb)metadata
num_framesfpsc                 K   s   |dur|durt d|dur$|n| j}|dur6|n| j}|j}|du r~|dur~|du sd|jdu rlt dt||j | }||krt d| d| d|durtd|||  }ntd| }|S )a%  
        Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
        If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
        and `fps` are mutually exclusive.

        Args:
            metadata (`VideoMetadata`):
                Metadata of the video containing information about total duration, fps and total number of frames.
            num_frames (`int`, *optional*):
                Maximum number of frames to sample. Defaults to `self.num_frames`.
            fps (`int` or `float`, *optional*):
                Target frames to sample per second. Defaults to `self.fps`.

        Returns:
            np.ndarray:
                Indices to sample video frames.
        Nzc`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!zAsked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input videoz(Video can't be sampled. The `num_frames=z` exceeds `total_num_frames=z`. r   )
ValueErrorr\   r]   total_num_framesinttorchZarange)rG   r[   r\   r]   r0   r_   indicesrM   rM   rN   sample_frames   s*    z BaseVideoProcessor.sample_frames)rP   video_metadatado_sample_framessample_indices_fnr1   c           
      C   s   t |}t||d}t|d rr|rrg }g }t||D ]0\}}||d}	|	|_|||	  || q6|}|}nNt|d st|d trdd | |D }|rt	dn| j
||d\}}||fS )zB
        Decode input videos and sample frames if needed.
        )rd   r   )r[   c                 S   s$   g | ]}t jd d |D ddqS )c                 S   s   g | ]}t |qS rM   )rV   Zpil_to_tensor).0imagerM   rM   rN   
<listcomp>@      zKBaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>.<listcomp>r   dim)ra   stack)rg   ZimagesrM   rM   rN   ri   ?  s   z@BaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>zUSampling frames from a list of images is not supported! Set `do_sample_frames=False`.rf   )r(   r'   r%   zipZframes_indicesappend
isinstancer@   Zfetch_imagesr^   fetch_videos)
rG   rP   rd   re   rf   Zsampled_videosZsampled_metadatarR   r[   rb   rM   rM   rN   _decode_and_sample_videos$  s.    

z,BaseVideoProcessor._decode_and_sample_videos)rP   input_data_formatdevicer1   c                 C   sV   g }|D ]H}t |tjr4t|tj|}t| }|durF|	|}|
| q|S )z:
        Prepare the input videos for processing.
        N)rq   npndarrayr*   r   ZFIRSTra   Z
from_numpy
contiguoustorp   )rG   rP   rt   ru   processed_videosrR   rM   rM   rN   _prepare_input_videosL  s    	
z(BaseVideoProcessor._prepare_input_videos)rP   r0   r1   c                 K   s  t | t| jj dg d | jjD ]}||t| |d  q*|d}|d}|d}|d}|rt| j	fi |nd }| j
||||d\}}| j|||d}| jf i |}| jf i | |d	 |d
}	| jf d|i|}
|	r||
d< |
S )Nreturn_tensors)Zcaptured_kwargsZvalid_processor_keysrt   re   ru   rd   )rd   re   rf   )rP   rt   ru   Zdata_formatreturn_metadatarP   )r   rC   r@   rA   rB   
setdefaultrF   r9   r   rc   rs   r{   Z_further_process_kwargsZ_validate_preprocess_kwargs_preprocess)rG   rP   r0   Z
kwarg_namert   re   ru   rd   rf   r}   Zpreprocessed_videosrM   rM   rN   rO   c  s4    






zBaseVideoProcessor.preprocesszF.InterpolationMode)rP   do_convert_rgb	do_resizer3   size_divisorinterpolationdo_center_cropr5   
do_rescaledo_padrescale_factordo_normalize
image_mean	image_stdr|   r1   c              	   K   s   t |\}}i }| D ]4\}}|r.| |}|rD| j||||d}|||< qt||}t |\}}i }| D ]4\}}|r| ||}| ||	||||}|||< qpt||}|rtj|ddn|}t	d|i|dS )N)r3   r   r   r   rk   r/   )dataZtensor_type)
r$   r;   rZ   resizer)   Zcenter_cropZrescale_and_normalizera   rm   r
   )rG   rP   r   r   r3   r   r   r   r5   r   r   r   r   r   r   r|   r0   Zgrouped_videosZgrouped_videos_indexZresized_videos_groupedrW   Zstacked_videosZresized_videosZprocessed_videos_groupedrz   rM   rM   rN   r     s.    




zBaseVideoProcessor._preprocessmain)pretrained_model_name_or_path	cache_dirforce_downloadlocal_files_onlytokenrevisionc           
      K   s   ||d< ||d< ||d< ||d< | dd}|durTtdt |durPtd|}|durd||d	< | j|fi |\}	}| j|	fi |S )
a  
        Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                This can be either:

                - a string, the *model id* of a pretrained video hosted inside a model repo on
                  huggingface.co.
                - a path to a *directory* containing a video processor file saved using the
                  [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
                - a path or url to a saved video processor JSON *file*, e.g.,
                  `./my_model_directory/video_preprocessor_config.json`.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model video processor should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force to (re-)download the video processor files and override the cached versions if
                they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `hf auth login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.


                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            return_unused_kwargs (`bool`, *optional*, defaults to `False`):
                If `False`, then this function returns just the final video processor object. If `True`, then this
                functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
                consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
                `kwargs` which has not been used to update `video_processor` and is otherwise ignored.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
            kwargs (`dict[str, Any]`, *optional*):
                The values in kwargs of any keys which are video processor attributes will be used to override the
                loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
                controlled by the `return_unused_kwargs` keyword parameter.

        Returns:
            A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].

        Examples:

        ```python
        # We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
        # derived class: *LlavaOnevisionVideoProcessor*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
        )  # Download video_processing_config from huggingface.co and cache.
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "./test/saved_model/"
        )  # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/video_preprocessor_config.json")
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
        )
        assert video_processor.do_normalize is False
        video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
        )
        assert video_processor.do_normalize is False
        assert unused_kwargs == {"foo": False}
        ```r   r   r   r   use_auth_tokenNrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.r   )r9   warningswarnFutureWarningr^   get_video_processor_dict	from_dict)
clsr   r   r   r   r   r   r0   r   video_processor_dictrM   rM   rN   from_pretrained  s&    Zz"BaseVideoProcessor.from_pretrained)save_directorypush_to_hubc           	      K   s  | dd}|dur>tdt |ddur6td||d< tj|rZt	d| dtj
|dd	 |r| d
d}| d|tjjd }| j|fi |}| |}| jdurt| || d tj|t}| | td|  |r| j|||||dd |gS )aq  
        Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
        [`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.

        Args:
            save_directory (`str` or `os.PathLike`):
                Directory where the video processor JSON file will be saved (will be created if it does not exist).
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs (`dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
        r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_id)configzVideo processor saved in )r   r   )r9   r   r   r   rE   r^   ospathisfileAssertionErrormakedirssplitsepZ_create_repoZ_get_files_timestamps_auto_classr	   joinr   to_json_filer>   infoZ_upload_modified_files)	rG   r   r   r0   r   r   r   Zfiles_timestampsZoutput_video_processor_filerM   rM   rN   save_pretrained2  sB    


z"BaseVideoProcessor.save_pretrained)r   r1   c                 K   sN  | dd}| dd}| dd}| dd}| dd}| dd}| d	d}	| d
d}
| dd}| dd}| dd}|durtdt |durtd|}d|d}|dur||d< t r|	std d}	t|}t	j
|}t	j
|r
|}d}nt|r"|}t|}n|t}z2t|tttg|||||	|||
|dd}|d }W nD tyn    Y n0 ty   td| d| dt dY n0 zRt|ddd}| }W d   n1 s0    Y  t|}|d|}W n& tjy   td | d!Y n0 |r0td"|  ntd"| d#|  ||fS )$a  
        From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
        video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.

        Returns:
            `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
        r   Nr   Fresume_downloadproxiesr   r   r   r   	subfolder Z_from_pipelineZ
_from_autor   r   video processor)	file_typefrom_auto_classZusing_pipelinez+Offline mode: forcing local_files_only=TrueT)	filenamesr   r   r   r   r   r   
user_agentr   r   Z%_raise_exceptions_for_missing_entriesr   z Can't load video processor for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z2' is the correct path to a directory containing a z filerutf-8encodingvideo_processorz"It looks like the config file at 'z' is not a valid JSON file.zloading configuration file z from cache at )r9   r   r   r   r^   r   r>   r   strr   r   isdirr   r   r   r   r    r   r   EnvironmentError	ExceptionOSErroropenreadjsonloadsrE   JSONDecodeError)r   r   r0   r   r   r   r   r   r   r   r   r   Zfrom_pipeliner   r   is_localZresolved_video_processor_fileZvideo_processor_fileZresolved_video_processor_filesreadertextr   rM   rM   rN   r   o  s    





(


z+BaseVideoProcessor.get_video_processor_dict)r   c                 K   s   |  }|dd}d|v r2d|v r2|d|d< d|v rPd|v rP|d|d< | f i |}g }| D ](\}}t||rjt||| || qj|D ]}||d qtd|  |r||fS |S dS )a  
        Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.

        Args:
            video_processor_dict (`dict[str, Any]`):
                Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
                retrieved from a pretrained checkpoint by leveraging the
                [`~video_processing_utils.VideoProcessorBase.to_dict`] method.
            kwargs (`dict[str, Any]`):
                Additional parameters from which to initialize the video processor object.

        Returns:
            [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
            parameters.
        return_unused_kwargsFr3   r5   NzVideo processor )copyr9   r;   hasattrr<   rp   r>   r   )r   r   r0   r   r   Z	to_removerH   rI   rM   rM   rN   r     s$    
zBaseVideoProcessor.from_dictc                 C   s2   t | j}|dd |dd | jj|d< |S )z
        Serializes this instance to a Python dictionary.

        Returns:
            `dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
        rD   NZ_valid_kwargs_namesZvideo_processor_type)r   __dict__r9   rL   __name__)rG   outputrM   rM   rN   to_dict  s
    
zBaseVideoProcessor.to_dictc                 C   sb   |   }| D ] \}}t|tjr| ||< q|dd}|durN||d< tj|dddd S )z
        Serializes this instance to a JSON string.

        Returns:
            `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
        r:   Nr2      T)indent	sort_keys
)	r   r;   rq   rv   rw   tolistr9   r   dumps)rG   
dictionaryrH   rI   r:   rM   rM   rN   to_json_string  s    z!BaseVideoProcessor.to_json_string)json_file_pathc                 C   s@   t |ddd}||   W d   n1 s20    Y  dS )z
        Save this instance to a JSON file.

        Args:
            json_file_path (`str` or `os.PathLike`):
                Path to the JSON file in which this image_processor instance's parameters will be saved.
        wr   r   N)r   writer   )rG   r   writerrM   rM   rN   r   .  s    zBaseVideoProcessor.to_json_filec                 C   s   | j j d|   S )N )rL   r   r   )rG   rM   rM   rN   __repr__9  s    zBaseVideoProcessor.__repr__)	json_filec                 C   sN   t |ddd}| }W d   n1 s,0    Y  t|}| f i |S )a  
        Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
        file of parameters.

        Args:
            json_file (`str` or `os.PathLike`):
                Path to the JSON file containing the parameters.

        Returns:
            A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
            instantiated from that JSON file.
        r   r   r   N)r   r   r   r   )r   r   r   r   r   rM   rM   rN   from_json_file<  s    &
z!BaseVideoProcessor.from_json_fileAutoVideoProcessorc                 C   sD   t |ts|j}ddlm  m} t||s:t| d|| _dS )a	  
        Register this class with a given auto class. This should only be used for custom video processors as the ones
        in the library are already mapped with `AutoVideoProcessor `.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
                The auto class to register this new video processor with.
        r   Nz is not a valid auto class.)	rq   r   r   Ztransformers.models.automodelsautor   r^   r   )r   Z
auto_classZauto_modulerM   rM   rN   register_for_auto_classO  s    

z*BaseVideoProcessor.register_for_auto_class)video_url_or_urlsc                    sP   d}t  std d}t|tr>tt fdd|D  S t|| dS dS )z
        Convert a single or a list of urls into the corresponding `np.array` objects.

        If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
        returned.
        Z
torchcodecz`torchcodec` is not installed and cannot be used to decode the video by default. Falling back to `torchvision`. Note that `torchvision` decoding is deprecated and will be removed in future versions. r,   c                    s   g | ]}j | d qS )rn   )rr   )rg   xrf   rG   rM   rN   ri   y  rj   z3BaseVideoProcessor.fetch_videos.<locals>.<listcomp>)backendrf   N)r   r   r   rq   r@   ro   r&   )rG   r   rf   r   rM   r   rN   rr   i  s    
zBaseVideoProcessor.fetch_videos)NN)NN)NN)N)NFFNr   )F)r   )N)Er   
__module____qualname__r   Zresampler   r   r3   r   r4   r5   r   r   r   r   r   r   r   re   r]   r\   rd   r}   r   rA   Zmodel_input_namesr   r8   r
   rQ   r"   rZ   r#   r   r`   r   floatrc   dictboolr   r@   rs   r   r   r{   r   BASE_VIDEO_PROCESSOR_DOCSTRINGrO   r   r   r   classmethodr   PathLiker   r   tupler   r   r   r   r   r   r   r   r   rr   __classcell__rM   rM   rK   rN   r.      s     9  
+  8 2     q=n,r.   r   r   zvideo processor file)objectZobject_classZobject_files)Fr   r   r   r   r   	functoolsr   typingr   r   r   r   numpyrv   Zdynamic_module_utilsr	   Zimage_processing_utilsr
   r   Zimage_processing_utils_fastr   Zimage_utilsr   r   r   Zprocessing_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   r   Z	utils.hubr    Zutils.import_utilsr!   Zvideo_utilsr"   r#   r$   r%   r&   r'   r(   r)   r*   ra   Ztorchvision.transforms.v2r+   rV   Ztorchvision.transformsZ
get_loggerr   r>   r   r.   r   __doc__formatrM   rM   rM   rN   <module>   sN   @,
F     h