a
    h~                     @   s  d Z ddlZddlZddlmZmZ ddlZddlZddlm	Z	 ddl
mZ ddlmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ eeZG dd de	jZejj dd Z!dd Z"G dd de	jZ#d7ej$e%e&ej$dddZ'G dd de	jZ(G dd de	jZ)G dd  d e	jZ*G d!d" d"e	jZ+d#d$ Z,d%d& Z-G d'd( d(eZ.G d)d* d*e	jZ/e	jdd+d,d-Z0eG d.d/ d/eZ1eG d0d1 d1e1Z2ed2d3G d4d5 d5e1eZ3g d6Z4dS )8zPyTorch ViTDet backbone.    N)OptionalUnion)nn   )ACT2FN)GradientCheckpointingLayer)BackboneOutputBaseModelOutput)PreTrainedModel)auto_docstringlogging)BackboneMixin   )VitDetConfigc                       s<   e Zd ZdZ fddZdd ZejejdddZ  Z	S )	VitDetEmbeddingsz
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) to be consumed by a Transformer.
    c                    s   t    |j|j }}|j|j }}t|tjj	r8|n||f}t|tjj	rR|n||f}|d |d  |d |d   }|| _
|| _|| _|| _|jr|d }ttd||j| _nd | _tj||||d| _d S )Nr   r   )Zkernel_sizeZstride)super__init__Zpretrain_image_size
patch_sizenum_channelshidden_size
isinstancecollectionsabcIterable
image_sizenum_patchesZ use_absolute_position_embeddingsr   	Parametertorchzerosposition_embeddingsConv2d
projection)selfconfigr   r   r   r   r   Znum_positions	__class__ f/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/vitdet/modeling_vitdet.pyr   +   s    
 zVitDetEmbeddings.__init__c                 C   s   |r|ddddf }|j d }tt|}|| |krDtdtj s^||ks^||krtj	j
|d||ddddd||fdd	d
}|ddddS |d||dS dS )a  
        Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the
        original embeddings.

        Args:
            abs_pos_embeddings (`torch.Tensor`):
                Absolute positional embeddings with (1, num_position, num_channels).
            has_cls_token (`bool`):
                If true, has 1 embedding in abs_pos_embeddings for cls token.
            height (`int`):
                Height of input image tokens.
            width (`int`):
                Width of input image tokens.

        Returns:
            Absolute positional embeddings after processing with shape (1, height, width, num_channels)
        Nr   z5Absolute position embeddings must be a square number.r   r      ZbicubicF)sizemodeZalign_corners)shapeintmathsqrt
ValueErrorr   jit
is_tracingr   
functionalinterpolatereshapepermute)r"   Zabs_pos_embeddingsZhas_cls_tokenheightwidthZnum_positionr*   Znew_abs_pos_embeddingsr&   r&   r'   get_absolute_positionsA   s    
z'VitDetEmbeddings.get_absolute_positions)pixel_valuesreturnc                 C   s   |j d }|| jkr,td| j d| d| |}| jd ur|dddd}|| | jd|j d |j d  }|dddd}|S )	Nr   zoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r)   r   T)r,   r   r0   r!   r   r6   r9   )r"   r:   r   
embeddingsr&   r&   r'   forwardg   s"    



zVitDetEmbeddings.forward)
__name__
__module____qualname____doc__r   r9   r   Tensorr>   __classcell__r&   r&   r$   r'   r   %   s   &r   c                 C   s   t dt| | d }|jd |krftjj|d|jd dddd|dd}|d|dd}n|}t	| dddf t||  d }t	|dddf t| | d }|| |d t| | d  }||
  S )	a  
    Get relative positional embeddings according to the relative positions of query and key sizes.

    Args:
        q_size (`int`):
            Size of query q.
        k_size (`int`):
            Size of key k.
        rel_pos (`torch.Tensor`):
            Relative position embeddings (num_embeddings, num_channels).

    Returns:
        Extracted positional embeddings according to relative positions.
    r)   r   r   r(   Zlinear)r*   r+   N      ?)r-   maxr,   r   r3   r4   r5   r6   r   Zarangelong)q_sizek_sizeZrel_posZmax_rel_distZrel_pos_resizedZq_coordsZk_coordsZrelative_coordsr&   r&   r'   get_rel_pos}   s    $$rJ   c                 C   s   |\}}|\}}	t |||}
t ||	|}|j\}}}|||||}td||
}
td||}| |||||	|
dddddddddf  |dddddddddf  ||| ||	 } | S )a  
    Calculate decomposed Relative Positional Embeddings as introduced in
    [MViT2](https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py).

    Args:
        attn (`torch.Tensor`):
            Attention map.
        queries (`torch.Tensor`):
            Query q in the attention layer with shape (batch_size, queries_height * queries_width, num_channels).
        rel_pos_h (`torch.Tensor`):
            Relative position embeddings (Lh, num_channels) for height axis.
        rel_pos_w (`torch.Tensor`):
            Relative position embeddings (Lw, num_channels) for width axis.
        q_size (`tuple[int]`):
            Spatial sequence size of query q with (queries_height, queries_width).
        k_size (`tuple[int]`):
            Spatial sequence size of key k with (keys_height, keys_width).

    Returns:
        attn (Tensor): attention map with added relative positional embeddings.
    zbhwc,hkc->bhwkzbhwc,wkc->bhwkN)rJ   r,   r5   r   Zeinsumview)Zattnqueries	rel_pos_h	rel_pos_wrH   rI   Zqueries_heightZqueries_widthZkeys_heightZ
keys_widthZrelative_heightZrelative_width
batch_size_dimZr_qZrelative_weightr&   r&   r'   !add_decomposed_relative_positions   s"      rR   c                       s,   e Zd ZdZd fdd	Zd	ddZ  ZS )
VitDetAttentionz=Multi-head Attention block with relative position embeddings.Nc                    s   t    |j}|j}|| _|| }|d | _tj||d |jd| _	t||| _
|j| _| jrttd|d  d || _ttd|d  d || _dS )z
        Args:
            config (`VitDetConfig`):
                Model configuration.
            input_size (`tuple[int]`, *optional*):
                Input resolution, only required in case relative position embeddings are added.
        g      r   biasr)   r   r   N)r   r   r   Znum_attention_heads	num_headsscaler   LinearZqkv_biasqkvproj use_relative_position_embeddingsr   r   r   rM   rN   )r"   r#   
input_sizerQ   rV   Zhead_dimr$   r&   r'   r      s    

 zVitDetAttention.__init__Fc                 C   s&  |j \}}}}| |||| d| jdddddd}|d|| j || dd\}}	}
|| j |	dd }| jrt	||| j
| j||f||f}|jdd}||
 }||| j||d}|ddddd}||||d}| |}|r||| j|j d |j d }||f}n|f}|S )	Nr   r(   r)   r   r      )rQ   )r,   rY   r5   rV   r6   ZunbindrW   Z	transposer[   rR   rM   rN   ZsoftmaxrK   rZ   )r"   hidden_stateoutput_attentionsrO   r7   r8   rP   rY   rL   keysvaluesZattention_scoresZattention_probsoutputsr&   r&   r'   r>      s*    ,&

zVitDetAttention.forward)N)Fr?   r@   rA   rB   r   r>   rD   r&   r&   r$   r'   rS      s   rS           F)input	drop_probtrainingr;   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    re   r   r   )r   )dtypedevice)r,   ndimr   Zrandri   rj   Zfloor_div)rf   rg   rh   Z	keep_probr,   Zrandom_tensoroutputr&   r&   r'   	drop_path
  s    
rn   c                       sP   e Zd ZdZdee dd fddZejejdddZ	e
d	d
dZ  ZS )VitDetDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).N)rg   r;   c                    s   t    || _d S N)r   r   rg   )r"   rg   r$   r&   r'   r   "  s    
zVitDetDropPath.__init__)hidden_statesr;   c                 C   s   t || j| jS rp   )rn   rg   rh   )r"   rq   r&   r&   r'   r>   &  s    zVitDetDropPath.forwardr;   c                 C   s   d| j  S )Nzp=)rg   r"   r&   r&   r'   
extra_repr)  s    zVitDetDropPath.extra_repr)N)r?   r@   rA   rB   r   floatr   r   rC   r>   strrt   rD   r&   r&   r$   r'   ro     s   ro   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )VitDetLayerNormaL  
    A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the
    channel dimension for inputs that have shape (batch_size, channels, height, width).
    https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119
    ư>c                    s@   t    tt|| _tt|| _|| _	|f| _
d S rp   )r   r   r   r   r   Zonesweightr   rU   epsnormalized_shape)r"   r{   rz   r$   r&   r'   r   4  s
    
zVitDetLayerNorm.__init__c                 C   sn   |j ddd}|| dj ddd}|| t|| j  }| jd d d d f | | jd d d d f  }|S )Nr   T)Zkeepdimr)   )meanpowr   r/   rz   ry   rU   )r"   xusr&   r&   r'   r>   ;  s
    ,zVitDetLayerNorm.forward)rx   rd   r&   r&   r$   r'   rw   -  s   rw   c                       s(   e Zd ZdZ fddZdd Z  ZS )VitDetResBottleneckBlockz
    The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels
    1x1, 3x3, 1x1.
    c                    s   t    tj||ddd| _t|| _t|j | _	tj||dddd| _
t|| _t|j | _tj||ddd| _t|| _dS )ar  
        Args:
            config (`VitDetConfig`):
                Model configuration.
            in_channels (`int`):
                Number of input channels.
            out_channels (`int`):
                Number of output channels.
            bottleneck_channels (`int`):
                Number of output channels for the 3x3 "bottleneck" conv layers.
        r   FrT   r   )paddingrU   N)r   r   r   r    conv1rw   norm1r   
hidden_actZact1conv2norm2Zact2conv3norm3)r"   r#   in_channelsout_channelsbottleneck_channelsr$   r&   r'   r   I  s    


z!VitDetResBottleneckBlock.__init__c                 C   s&   |}|   D ]}||}q|| }|S rp   )children)r"   r~   outlayerr&   r&   r'   r>   a  s
    
z VitDetResBottleneckBlock.forwardrd   r&   r&   r$   r'   r   C  s   r   c                       s:   e Zd Zeedd fddZejejdddZ  ZS )	VitDetMlpN)in_featureshidden_featuresr;   c                    sD   t    t||| _t|j | _t||| _t	|j
| _d S rp   )r   r   r   rX   fc1r   r   actfc2ZDropoutZdropout_probdrop)r"   r#   r   r   r$   r&   r'   r   k  s
    
zVitDetMlp.__init__)r~   r;   c                 C   s6   |  |}| |}| |}| |}| |}|S rp   )r   r   r   r   )r"   r~   r&   r&   r'   r>   r  s    




zVitDetMlp.forward)	r?   r@   rA   r-   r   r   rC   r>   rD   r&   r&   r$   r'   r   j  s   r   c              	   C   s   | j \}}}}|||  | }|||  | }tj| ddd|d|f} || ||  }}	| ||| ||	| ||} | dddddd d|||}
|
||	ffS )a  
    Partition into non-overlapping windows with padding if needed.

    Args:
        hidden_state (`torch.Tensor`):
            Input tokens with [batch_size, height, width, num_channels].
        window_size (`int`):
            Window size.

    Returns:
        `tuple(torch.FloatTensor)` comprising various elements:
        - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
        - (padded_height, padded_width): padded height and width before partition
    r   r   r   r)   r]      r(   )r,   r   r3   padrK   r6   
contiguous)r_   window_sizerO   r7   r8   r   Z
pad_heightZ	pad_widthpadded_heightpadded_widthwindowsr&   r&   r'   window_partition|  s    $r   c           
      C   s   |\}}|\}}| j d || | |  }| ||| || ||d}	|	dddddd }	|	|||d}	|	ddd|d|ddf  }	|	S )	aB  
    Window unpartition into original sequences and removing padding.

    Args:
        windows (`torch.Tensor`):
            Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
        window_size (`int`):
            Window size.
        pad_height_width (`tuple[int]`):
            Padded height and width (padded_height, padded_width).
        height_width (`tuple[int]`):
            Original height and width before padding.

    Returns:
        hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
    r   r(   r   r   r)   r]   r   N)r,   rK   r6   r   )
r   r   pad_height_widthZheight_widthr   r   r7   r8   rO   r_   r&   r&   r'   window_unpartition  s    $r   c                       sh   e Zd ZdZdeeeedd fddZde	j
ee	j
 eeee	j
e	j
f ee	j
 f dd	d
Z  ZS )VitDetLayerzCThis corresponds to the Block class in the original implementation.r   FN)r#   drop_path_rater   use_residual_blockr;   c           	         s  t    |j}|j}t|ttfr(|n||f}|j}t|ttfrH|n||f}|d |d  |d |d  f}tj	||j
d| _t||dkr|n||fd| _|dkrt|nt | _tj	||j
d| _t||t||j d| _|| _|| _| jrt||||d d| _d S )	Nr   r   )rz   )r\   re   )r#   r   r   r)   )r#   r   r   r   )r   r   r   r   r   listtupler   r   	LayerNormZlayer_norm_epsr   rS   	attentionro   ZIdentityrn   r   r   r-   Z	mlp_ratiomlpr   r   r   residual)	r"   r#   r   r   r   rQ   r   r   r\   r$   r&   r'   r     s.    
 zVitDetLayer.__init__)rq   	head_maskr`   r;   c           
      C   s   | dddd}|}| |}| jdkrN|jd |jd  }}t|| j\}}| j||d}|d }|dd  }	| jdkrt|| j|||f}|| | }|| | | 	| }| dddd}| j
r| |}|f|	 }	|	S )Nr   r)   r   r   )r`   )r6   r   r   r,   r   r   r   rn   r   r   r   r   )
r"   rq   r   r`   Zshortcutr7   r8   r   Zself_attention_outputsrc   r&   r&   r'   r>     s*    




zVitDetLayer.forward)r   r   F)NF)r?   r@   rA   rB   r   ru   r-   boolr   r   rC   r   r   r   r>   rD   r&   r&   r$   r'   r     s    &  r   c                	       sN   e Zd Zedd fddZd
ejeej eeee	e
ef ddd	Z  ZS )VitDetEncoderN)r#   r;   c              	      s   t    || _|j}dd tjd|j|ddD }g }t|D ]4}|t	||| ||j
v rb|jnd||jv d q@t|| _d| _d S )Nc                 S   s   g | ]}|  qS r&   )item).0r~   r&   r&   r'   
<listcomp>      z*VitDetEncoder.__init__.<locals>.<listcomp>r   cpu)rj   )r   r   r   F)r   r   r#   num_hidden_layersr   Zlinspacer   rangeappendr   Zwindow_block_indicesr   Zresidual_block_indicesr   Z
ModuleListr   Zgradient_checkpointing)r"   r#   depthr   Zlayersir$   r&   r'   r     s     
	zVitDetEncoder.__init__FT)rq   r   r`   output_hidden_statesreturn_dictr;   c                 C   s   |rdnd }|rdnd }t | jD ]P\}}	|r8||f }|d urH|| nd }
|	||
|}|d }|r"||d f }q"|r||f }|stdd |||fD S t|||dS )Nr&   r   r   c                 s   s   | ]}|d ur|V  qd S rp   r&   )r   vr&   r&   r'   	<genexpr>?  r   z(VitDetEncoder.forward.<locals>.<genexpr>Zlast_hidden_staterq   
attentions)	enumerater   r   r	   )r"   rq   r   r`   r   r   Zall_hidden_statesZall_self_attentionsr   Zlayer_moduleZlayer_head_maskZlayer_outputsr&   r&   r'   r>   #  s&    

zVitDetEncoder.forward)NFFT)r?   r@   rA   r   r   r   rC   r   r   r   r   r	   r>   rD   r&   r&   r$   r'   r     s       
r   moduler;   c                 C   s2   t jj| jddd | jdur.t j| jd dS )a  
    Initialize `module.weight` using the "MSRAFill" implemented in Caffe2. Also initializes `module.bias` to 0.

    Source: https://detectron2.readthedocs.io/en/latest/_modules/fvcore/nn/weight_init.html.

    Args:
        module (torch.nn.Module): module to initialize.
    Zfan_outZrelu)r+   ZnonlinearityNr   )r   initZkaiming_normal_ry   rU   Z	constant_)r   r&   r&   r'   caffe2_msra_fillG  s    	
r   c                   @   sF   e Zd ZU eed< dZdZdZg Ze	e
je
je
jf ddddZdS )	VitDetPreTrainedModelr#   Zvitdetr:   TNr   c                 C   s  t |tjtjfr\tjj|jjt	j
d| jjd|jj|j_|jdurX|jj  n@t |tjr|jj  |jjd nt |trtjj|jjt	j
d| jjd|jj|j_nt |tr&| jjr&tjj|jjt	j
d| jjd|j_tjj|jjt	j
d| jjd|j_nvt |tr|j|j|jfD ]}t| qB|j|jfD ] }|jjd |jj  q^|jjj  |jjj  dS )zInitialize the weightsre   )r|   ZstdNrE   ) r   r   rX   r    r   Ztrunc_normal_ry   datator   Zfloat32r#   Zinitializer_rangeri   rU   Zzero_r   Zfill_r   r   rS   r[   rM   rN   r   r   r   r   r   r   r   r   )r"   r   r   r&   r&   r'   _init_weights]  sL    



z#VitDetPreTrainedModel._init_weights)r?   r@   rA   r   __annotations__Zbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesr   r   rX   r    r   r   r&   r&   r&   r'   r   U  s   
r   c                
       s   e Zd Zed fddZedddZeee	e f ddd	d
Z
edeej eej ee ee ee eeef dddZ  ZS )VitDetModelr#   c                    s2   t  | || _t|| _t|| _|   d S rp   )r   r   r#   r   r=   r   encoder	post_initr"   r#   r$   r&   r'   r     s
    

zVitDetModel.__init__rr   c                 C   s   | j jS rp   r=   r!   rs   r&   r&   r'   get_input_embeddings  s    z VitDetModel.get_input_embeddingsN)heads_to_pruner;   c                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r   r   Zprune_heads)r"   r   r   Zheadsr&   r&   r'   _prune_heads  s    zVitDetModel._prune_heads)r:   r   r`   r   r   r;   c           	      C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}|du rLtd| || j j}| |}| j|||||d}|d }|s|f|dd  S t	||j
|jdS )a  
        Examples:

        ```python
        >>> from transformers import VitDetConfig, VitDetModel
        >>> import torch

        >>> config = VitDetConfig()
        >>> model = VitDetModel(config)

        >>> pixel_values = torch.randn(1, 3, 224, 224)

        >>> with torch.no_grad():
        ...     outputs = model(pixel_values)

        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 768, 14, 14]
        ```Nz You have to specify pixel_values)r   r`   r   r   r   r   r   )r#   r`   r   use_return_dictr0   Zget_head_maskr   r=   r   r	   rq   r   )	r"   r:   r   r`   r   r   embedding_outputZencoder_outputsZsequence_outputr&   r&   r'   r>     s.    
zVitDetModel.forward)NNNNN)r?   r@   rA   r   r   r   r   dictr-   r   r   r   r   r   rC   r   r   r   r	   r>   rD   r&   r&   r$   r'   r     s"   
     
r   zF
    ViTDet backbone, to be used with frameworks like Mask R-CNN.
    )Zcustom_introc                       sT   e Zd Z fddZedddZed
eje	e
 e	e
 e	e
 eddd	Z  ZS )VitDetBackbonec                    sV   t    t    t | _t | _ fddt jd D | _	| 
  d S )Nc                    s   g | ]
} j qS r&   )r   )r   rP   r   r&   r'   r     r   z+VitDetBackbone.__init__.<locals>.<listcomp>r   )r   r   Z_init_backboner   r=   r   r   r   r   Znum_featuresr   r   r$   r   r'   r     s    

zVitDetBackbone.__init__rr   c                 C   s   | j jS rp   r   rs   r&   r&   r'   r     s    z#VitDetBackbone.get_input_embeddingsN)r:   r   r`   r   r;   c                 C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}| |}| j|d||d}|rb|jn|d }d}t| j|D ]\}	}
|	| j	v rz||
f7 }qz|s|r|f|dd  }n|f|dd  }|S t
||r|jnd|jdS )a  
        Examples:

        ```python
        >>> from transformers import VitDetConfig, VitDetBackbone
        >>> import torch

        >>> config = VitDetConfig()
        >>> model = VitDetBackbone(config)

        >>> pixel_values = torch.randn(1, 3, 224, 224)

        >>> with torch.no_grad():
        ...     outputs = model(pixel_values)

        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 768, 14, 14]
        ```NT)r   r`   r   r   r&   r)   )feature_mapsrq   r   )r#   r   r   r`   r=   r   rq   zipZstage_namesZout_featuresr   r   )r"   r:   r   r`   r   r   rc   rq   r   Zstager_   rm   r&   r&   r'   r>     s4    

zVitDetBackbone.forward)NNN)r?   r@   rA   r   r   r   r   r   rC   r   r   r   r>   rD   r&   r&   r$   r'   r     s      r   )r   r   r   )re   F)5rB   collections.abcr   r.   typingr   r   r   Ztorch.utils.checkpointr   Zactivationsr   Zmodeling_layersr   Zmodeling_outputsr   r	   Zmodeling_utilsr
   utilsr   r   Zutils.backbone_utilsr   Zconfiguration_vitdetr   Z
get_loggerr?   loggerModuler   r1   Zscript_if_tracingrJ   rR   rS   rC   ru   r   rn   ro   rw   r   r   r   r   r   r   r   r   r   r   __all__r&   r&   r&   r'   <module>   sL   
X
$)?' Q;3WN