a
    ho                  	   @   s  d Z ddlmZmZ ddlZddlZddlmZ ddlmZ ddl	m
Z
 ddlmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddlmZmZ ddlm Z  e!e"Z#G dd dej$Z%G dd dej$Z&d1ej$ej'ej'ej'eej' e(e(dddZ)G dd dej$Z*G dd dej$Z+G dd dej$Z,G dd dej$Z-G d d! d!ej$Z.G d"d# d#e
Z/G d$d% d%ej$Z0G d&d' d'ej$Z1eG d(d) d)eZ2eG d*d+ d+e2Z3ed,d-G d.d/ d/e2Z4g d0Z5dS )2zPyTorch ViViT model.    )CallableOptionalN)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )VivitConfigc                       s>   e Zd ZdZed fddZd	ejeejdddZ	  Z
S )
VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    configc                    s|   t    |j| _|j| _|j| _| j| jd  | j| jd   | j| jd   | _|j| _t	j
|j|j|j|jd| _d S )N   r   r   )Zkernel_sizeZstride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_sizeZ	embed_dimr   Conv3dnum_channels
projectionselfr   	__class__ d/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/vivit/modeling_vivit.pyr   0   s    
zVivitTubeletEmbeddings.__init__Fpixel_valuesinterpolate_pos_encodingreturnc           	   
   C   s   |j \}}}}}|sV|| jks(|| jkrVtd| d| d| jd  d| jd  d	|ddddd	}| |}|ddd}|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r      )shaper   
ValueErrorpermuter&   flatten	transpose)	r(   r.   r/   
batch_sizer   r%   heightwidthxr+   r+   r,   forward@   s    (
zVivitTubeletEmbeddings.forward)F)__name__
__module____qualname____doc__r   r   torchTensorboolr<   __classcell__r+   r+   r)   r,   r   %   s   
r   c                       sV   e Zd ZdZed fddZejeeejdddZ	deje
ejd	d
dZ  ZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    r   c                    st   t    ttdd|j| _t|| _	ttd| j	j
d |j| _t|j| _|jdd  | _|| _d S )Nr   )r   r   r   	ParameterrA   Zzerosr#   	cls_tokenr   patch_embeddingsr"   position_embeddingsDropouthidden_dropout_probdropoutr    r!   r   r'   r)   r+   r,   r   X   s    

zVivitEmbeddings.__init__)
embeddingsr9   r:   r0   c                 C   s   |j d d }| jj d d }tj s>||kr>||kr>| jS | jddddf }| jddddf }|j d }|| jd  }	|| jd  }
t|d }|d|||}|dddd}t	j
j||	|
fdd	d
}|dddddd|}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r   r   ZbicubicF)sizemodeZalign_cornersdim)r3   rI   rA   Zjit
is_tracingr!   r   reshaper5   r   
functionalZinterpolateviewcat)r(   rM   r9   r:   r"   Znum_positionsZclass_pos_embedZpatch_pos_embedrR   Z
new_heightZ	new_widthZsqrt_num_positionsr+   r+   r,   r/   f   s(    

z(VivitEmbeddings.interpolate_pos_encodingFr-   c           
      C   sr   |j \}}}}}| j||d}| j|ddg}	tj|	|fdd}|rZ|| ||| }n
|| j }| |}|S )Nr/   r   rQ   )	r3   rH   rG   ZtilerA   rW   r/   rI   rL   )
r(   r.   r/   r8   r   r%   r9   r:   rM   Z
cls_tokensr+   r+   r,   r<      s    

zVivitEmbeddings.forward)F)r=   r>   r?   r@   r   r   rA   rB   intr/   rC   r<   rD   r+   r+   r)   r,   rE   Q   s   (rE           )modulequerykeyvalueattention_maskscalingrL   c           
      K   s|   t ||dd| }tjj|dt jd|j}tjj	||| j
d}|d urX|| }t ||}	|	dd }	|	|fS )NrN   )rR   dtype)ptrainingr   r   )rA   matmulr7   r   rU   ZsoftmaxZfloat32torb   rL   rd   
contiguous)
r[   r\   r]   r^   r_   r`   rL   kwargsZattn_weightsZattn_outputr+   r+   r,   eager_attention_forward   s    ri   c                       sJ   e Zd Zed fddZdejeej eejejf dddZ	  Z
S )	VivitSelfAttentionr   c                    s   t    |j|j dkr>t|ds>td|j d|j d|| _|j| _t|j|j | _| j| j | _	|j
| _| jd | _d| _tj|j| j	|jd| _tj|j| j	|jd| _tj|j| j	|jd| _d S )	Nr   Zembedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      F)bias)r   r   r#   num_attention_headshasattrr4   r   rY   attention_head_sizeall_head_sizeZattention_probs_dropout_probdropout_probr`   	is_causalr   LinearZqkv_biasr\   r]   r^   r'   r)   r+   r,   r      s"    

zVivitSelfAttention.__init__Nhidden_states	head_maskr0   c              
   C   s   |j d }|d| j| jf}| |j| dd}| |j| dd}| |j| dd}t}| j	j
dkr~t| j	j
 }|| ||||| j| j| jsdn| jd\}	}
|	 d d | jf }|	|}	|	|
fS )	Nr   rN   r   r   eagerrZ   )rr   r`   rL   ra   )r3   rm   ro   r]   rV   r7   r^   r\   ri   r   Z_attn_implementationr   rr   r`   rd   rq   rO   rp   rT   )r(   ru   rv   r8   Z	new_shapeZ	key_layerZvalue_layerZquery_layerZattention_interfaceZcontext_layerZattention_probsZnew_context_layer_shaper+   r+   r,   r<      s*    


zVivitSelfAttention.forward)N)r=   r>   r?   r   r   rA   rB   r   tupler<   rD   r+   r+   r)   r,   rj      s    rj   c                       s>   e Zd ZdZed fddZejejejdddZ  Z	S )VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                    s.   t    t|j|j| _t|j| _d S N)	r   r   r   rs   r#   denserJ   rK   rL   r'   r)   r+   r,   r      s    
zVivitSelfOutput.__init__ru   input_tensorr0   c                 C   s   |  |}| |}|S rz   r{   rL   r(   ru   r}   r+   r+   r,   r<      s    

zVivitSelfOutput.forward)
r=   r>   r?   r@   r   r   rA   rB   r<   rD   r+   r+   r)   r,   ry      s   ry   c                       sR   e Zd Zed fddZee dddZdej	e
ej	 ej	dd	d
Z  ZS )VivitAttentionr   c                    s*   t    t|| _t|| _t | _d S rz   )r   r   rj   	attentionry   outputsetpruned_headsr'   r)   r+   r,   r     s    


zVivitAttention.__init__)headsc                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   rQ   )lenr   r   rm   ro   r   r   r\   r]   r^   r   r{   rp   union)r(   r   indexr+   r+   r,   prune_heads  s    zVivitAttention.prune_headsNrt   c                 C   s    |  ||\}}| ||}|S rz   )r   r   )r(   ru   rv   Zself_attn_output_r   r+   r+   r,   r<      s    zVivitAttention.forward)N)r=   r>   r?   r   r   r   rY   r   rA   rB   r   r<   rD   r+   r+   r)   r,   r     s   r   c                       s6   e Zd Zed fddZejejdddZ  ZS )VivitIntermediater   c                    sP   t    t|j|j| _t|j| _	t
|jtrDt|j | _n|j| _d S rz   )r   r   r   rs   r#   intermediate_sizer{   rJ   rK   rL   
isinstanceZ
hidden_actstrr   intermediate_act_fnr'   r)   r+   r,   r   '  s    
zVivitIntermediate.__init__ru   r0   c                 C   s"   |  |}| |}| |}|S rz   )r{   r   rL   )r(   ru   r+   r+   r,   r<   0  s    


zVivitIntermediate.forward	r=   r>   r?   r   r   rA   rB   r<   rD   r+   r+   r)   r,   r   &  s   	r   c                       s:   e Zd Zed fddZejejejdddZ  ZS )VivitOutputr   c                    s.   t    t|j|j| _t|j| _	d S rz   )
r   r   r   rs   r   r#   r{   rJ   rK   rL   r'   r)   r+   r,   r   9  s    
zVivitOutput.__init__r|   c                 C   s    |  |}| |}|| }|S rz   r~   r   r+   r+   r,   r<   >  s    

zVivitOutput.forwardr   r+   r+   r)   r,   r   8  s   r   c                       sD   e Zd ZdZed fddZd	ejeej ejdddZ	  Z
S )

VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.r   c                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   eps)r   r   Zchunk_size_feed_forwardZseq_len_dimr   r   r   intermediater   r   r   	LayerNormr#   layer_norm_epslayernorm_beforelayernorm_afterr'   r)   r+   r,   r   H  s    



zVivitLayer.__init__Nrt   c                 C   sB   |  |}| ||}|| }| |}| |}| ||}|S rz   )r   r   r   r   r   )r(   ru   rv   Zhidden_states_normZattention_outputZlayer_outputr+   r+   r,   r<   R  s    


zVivitLayer.forward)N)r=   r>   r?   r@   r   r   rA   rB   r   r<   rD   r+   r+   r)   r,   r   E  s   
r   c                       s>   e Zd Zed fddZdejeej edddZ	  Z
S )	VivitEncoderr   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r+   )r   ).0r   r   r+   r,   
<listcomp>g      z)VivitEncoder.__init__.<locals>.<listcomp>F)	r   r   r   r   Z
ModuleListrangenum_hidden_layerslayerZgradient_checkpointingr'   r)   r   r,   r   d  s    
 zVivitEncoder.__init__Nrt   c                 C   s<   t | jD ]&\}}|d ur"|| nd }|||}q
t|dS )N)last_hidden_state)	enumerater   r   )r(   ru   rv   iZlayer_moduleZlayer_head_maskr+   r+   r,   r<   j  s    zVivitEncoder.forward)N)r=   r>   r?   r   r   rA   rB   r   r   r<   rD   r+   r+   r)   r,   r   c  s   r   c                       s6   e Zd Zed fddZejejdddZ  ZS )VivitPoolerr   c                    s*   t    t|j|j| _t | _d S rz   )r   r   r   rs   r#   r{   ZTanh
activationr'   r)   r+   r,   r   s  s    
zVivitPooler.__init__r   c                 C   s(   |d d df }|  |}| |}|S )Nr   )r{   r   )r(   ru   Zfirst_token_tensorpooled_outputr+   r+   r,   r<   x  s    

zVivitPooler.forwardr   r+   r+   r)   r,   r   r  s   r   c                   @   sH   e Zd ZU eed< dZdZdZg ZdZ	dZ
dZdZeedZdd ZdS )	VivitPreTrainedModelr   vivitr.   T)ru   
attentionsc                 C   s   t |tjtjfr@|jjjd| jjd |j	dur|j	j
  nt |tjr|jjjd| jjd |jdur|jj|j 
  nJt |tjr|j	j
  |jjd n"t |tr|jj
  |jj
  dS )zInitialize the weightsrZ   )meanZstdNg      ?)r   r   rs   r$   weightdataZnormal_r   Zinitializer_rangerl   Zzero_Z	EmbeddingZpadding_idxr   Zfill_rE   rG   rI   )r(   r[   r+   r+   r,   _init_weights  s    


z"VivitPreTrainedModel._init_weightsN)r=   r>   r?   r   __annotations__Zbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesZ_supports_sdpaZ_supports_flash_attnZ_supports_flex_attnZ_supports_attention_backendr   rj   Z_can_record_outputsr   r+   r+   r+   r,   r     s   
r   c                	       sf   e Zd Zdeed fddZdd Zdd Zee	de
ej e
ej eee edddZ  ZS )
VivitModelT)r   add_pooling_layerc                    sX   t  | || _t|| _t|| _tj|j	|j
d| _|rFt|nd| _|   dS )zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        r   N)r   r   r   rE   rM   r   encoderr   r   r#   r   	layernormr   pooler	post_init)r(   r   r   r)   r+   r,   r     s    

zVivitModel.__init__c                 C   s   | j jS rz   )rM   rH   )r(   r+   r+   r,   get_input_embeddings  s    zVivitModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr   r   r   r   )r(   Zheads_to_pruner   r   r+   r+   r,   _prune_heads  s    zVivitModel._prune_headsNF)r.   rv   r/   rh   r0   c           	      K   sp   |du rt d| || jj}| j||d}| j||d}|j}| |}| jdur`| |nd}t	||dS )a  
        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrX   )rv   )r   Zpooler_output)
r4   Zget_head_maskr   r   rM   r   r   r   r   r	   )	r(   r.   rv   r/   rh   Zembedding_outputZencoder_outputssequence_outputr   r+   r+   r,   r<     s    T
zVivitModel.forward)T)NNF)r=   r>   r?   r   rC   r   r   r   r   r   r   rA   FloatTensorr   r   r	   r<   rD   r+   r+   r)   r,   r     s      r   a  
        ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
    [CLS] token) e.g. for Kinetics-400.

        <Tip>

            Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
            setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
            position embeddings to the higher resolution.

        </Tip>
    )Zcustom_introc                
       sZ   e Zd Zed fddZeed	eej	 eej	 eej
 eee edddZ  ZS )
VivitForVideoClassificationr   c                    sR   t  | |j| _t|dd| _|jdkr<t|j|jnt | _	| 
  d S )NF)r   r   )r   r   Z
num_labelsr   r   r   rs   r#   ZIdentity
classifierr   r'   r)   r+   r,   r   7  s
    $z$VivitForVideoClassification.__init__NF)r.   rv   labelsr/   rh   r0   c           
      K   sr   | j |f||d|}|j}| |dddddf }d}	|dur^| j||| jfi |}	t|	||j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```)rv   r/   Nr   )losslogitsru   r   )r   r   r   Zloss_functionr   r
   ru   r   )
r(   r.   rv   r   r/   rh   outputsr   r   r   r+   r+   r,   r<   C  s$    ]z#VivitForVideoClassification.forward)NNNF)r=   r>   r?   r   r   r   r   r   rA   r   Z
LongTensorrC   r   r   r
   r<   rD   r+   r+   r)   r,   r   (  s       r   )r   r   r   )rZ   )6r@   typingr   r   rA   Ztorch.utils.checkpointr   Zactivationsr   Zmodeling_layersr   Zmodeling_outputsr   r	   r
   Zmodeling_utilsr   r   Zprocessing_utilsr   Zpytorch_utilsr   r   utilsr   r   r   r   Zutils.genericr   r   Zconfiguration_vivitr   Z
get_loggerr=   loggerModuler   rE   rB   floatri   rj   ry   r   r   r   r   r   r   r   r   r   __all__r+   r+   r+   r,   <module>   sX   
,W 5# |