a
    h"U                  	   @   sR  d Z ddlZddlmZ ddlmZmZmZ ddlZddl	Zddlm
Z
 ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZmZ ddlmZ eeZeG dd deZG dd de
jZd e
jej ej ej eej  e!e!dddZ"G dd de
jZ#G dd de
jZ$G dd deZ%G dd de
jZ&G dd de
jZ'dS )!zTPyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object    N)	dataclass)CallableOptionalUnion)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPooling)ALL_ATTENTION_FUNCTIONS)ModelOutputcan_return_tuplelogging   )IdeficsVisionConfigc                   @   sj   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dS )IdeficsVisionModelOutputa  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   tupler    r   r   ^/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/idefics/vision.pyr   (   s
   
r   c                       sR   e Zd Zed fddZejeeejdddZdej	e
ejdd	d
Z  ZS )IdeficsVisionEmbeddingsconfigc                    s   t    || _|j| _|j| _|j| _tt	
| j| _tj|j| j| j| jdd| _| j| j d | _| jd | _t| j| j| _| jdt	| jddd d S )NF)Zin_channelsZout_channelsZkernel_sizeZstrideZbias   r   position_ids)r   )
persistent)super__init__r#   hidden_size	embed_dim
image_size
patch_sizer   	Parameterr   Zrandnclass_embeddingZConv2dnum_channelspatch_embeddingnum_patchesnum_positionsZ	Embeddingposition_embeddingZregister_bufferZarangeexpandselfr#   	__class__r   r    r)   G   s"    
z IdeficsVisionEmbeddings.__init__)
embeddingsheightwidthreturnc                 C   s  |j d d }| | j}|j d d }||kr<||kr<|S |dddf }|ddddf }|j d }	|| jj }
|| jj }|
d |d  }
}t|}|dt|t||	}|	dddd}|j
tjk}|rtd |tj}tjj||
| || fd	d
d}|r|tj}t|
|j d ksFt||j d krxtdt|
t|f d|j d |j d f d|	dddddd|	}tj|d|fddS )a#  
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
        resolution images.

        Source:
        https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
        r   Nr   r&   g?r   r$   zUpcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead.ZbicubicF)Zscale_factormodeZalign_cornerszNumber of patches for images (z/) don't match the shape of position embedding ()dim)shaper4   r%   r#   r-   mathsqrtreshapeintZpermutedtyper   Zbfloat16loggerwarning_oncetofloatr   
functionalZinterpolate
ValueErrorviewcatZ	unsqueeze)r7   r:   r;   r<   r2   Z	pos_embedr3   Zclass_pos_embedZpatch_pos_embedr+   Znum_h_patchesZnum_w_patchesZsqrt_num_positionsZfp32_upcastingr   r   r    interpolate_pos_encoding^   sH    	

(z0IdeficsVisionEmbeddings.interpolate_pos_encodingF)pixel_valuesrQ   r=   c              
   C   s   |j \}}}}|sL|| jks&|| jkrLtd| d| d| j d| j d	| jjj}| |j|d}|ddd}| j	
|dd}	tj|	|gdd	}
|r|
| |
|| }
n|
| | j }
|
S )
NzInput image size (*z) doesn't match model (z8). You should try to set `interpolate_pos_encoding=True`)rH   r$   r   r&   rA   )rC   r,   rN   r1   weightrH   rK   flatten	transposer/   r5   r   rP   rQ   r4   r%   )r7   rR   rQ   
batch_sizer0   r;   r<   Ztarget_dtypeZpatch_embedsZclass_embedsr:   r   r   r    forward   s&    
zIdeficsVisionEmbeddings.forward)F)r   r   r   r   r)   r   TensorrG   rQ   r   boolrX   __classcell__r   r   r8   r    r!   F   s   1r!           )modulequerykeyvalueattention_maskscalingdropoutc           
      K   s|   t ||dd| }|d ur(|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )Nr&   r?   )rB   rH   )ptrainingr   r$   )r   matmulrV   r   rM   ZsoftmaxZfloat32rK   rH   rc   re   
contiguous)
r]   r^   r_   r`   ra   rb   rc   kwargsattn_weightsattn_outputr   r   r    eager_attention_forward   s    
rk   c                	       s`   e Zd ZdZed fddZd
ejeej eej ee	 e
ejeej f ddd	Z  ZS )IdeficsVisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr"   c                    s   t    || _|j| _|j| _| j| j | _| j| j | jkrZtd| j d| j d| jd | _	|j
| _d| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      F)r(   r)   r#   r*   r+   Znum_attention_heads	num_headshead_dimrN   scaleZattention_dropoutrc   	is_causalr   Lineark_projv_projq_projout_projr6   r8   r   r    r)      s$    

zIdeficsVisionAttention.__init__NFr   ra   causal_attention_maskoutput_attentionsr=   c              
   C   sP  |j \}}}| |}| |}	| |}
|||| j| jdd}|	||| j| jdd}	|
||| j| jdd}
| jj	dkr|dur|dur|| }q|dur|}n
|du| _
t}| jj	dkr| jj	dkr|rtd nt| jj	 }|| ||	|
|| j
| j| jsdn| jd	\}}|||| }| |}|sHd}||fS )
z#Input shape: Batch x Time x Channelr   r$   Zflash_attention_2NeagerZsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r\   )rp   rb   rc   )rC   rt   rr   rs   rO   rm   rn   rV   r#   Z_attn_implementationrp   rk   rI   rJ   r   ro   re   rc   rF   rg   ru   )r7   r   ra   rw   rx   rW   Z
seq_lengthr+   ZquerieskeysvaluesZattention_interfacerj   ri   r   r   r    rX      sF    	






zIdeficsVisionAttention.forward)NNF)r   r   r   r   r   r)   r   rY   r   rZ   r   rX   r[   r   r   r8   r    rl      s      rl   c                       s0   e Zd Z fddZejejdddZ  ZS )IdeficsVisionMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r(   r)   r#   r   Z
hidden_actactivation_fnr   rq   r*   Zintermediate_sizefc1fc2r6   r8   r   r    r)     s
    
zIdeficsVisionMLP.__init__)r   r=   c                 C   s"   |  |}| |}| |}|S r}   )r   r~   r   )r7   r   r   r   r    rX     s    


zIdeficsVisionMLP.forward)r   r   r   r)   r   rY   rX   r[   r   r   r8   r    r|     s   r|   c                       sJ   e Zd Zed fddZdejejejee e	ej
 dddZ  ZS )	IdeficsVisionEncoderLayerr"   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S N)eps)r(   r)   r*   r+   rl   	self_attnr   	LayerNormlayer_norm_epslayer_norm1r|   mlplayer_norm2r6   r8   r   r    r)   "  s    


z"IdeficsVisionEncoderLayer.__init__Frv   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r`||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   ra   rw   rx   )r   r   r   r   )r7   r   ra   rw   rx   Zresidualri   outputsr   r   r    rX   *  s"    




z!IdeficsVisionEncoderLayer.forward)F)r   r   r   r   r)   r   rY   r   rZ   r   r   rX   r[   r   r   r8   r    r   !  s    r   c                
       sd   e Zd ZdZed fddZed	eej	 eej	 ee
 ee
 ee
 eeef dddZ  ZS )
IdeficsVisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`IdeficsVisionEncoderLayer`].

    Args:
        config: IdeficsVisionConfig
    r"   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r   )r   ).0_r"   r   r    
<listcomp>`      z1IdeficsVisionEncoder.__init__.<locals>.<listcomp>F)	r(   r)   r#   r   Z
ModuleListrangeZnum_hidden_layerslayersZgradient_checkpointingr6   r8   r"   r    r)   ]  s    
 zIdeficsVisionEncoder.__init__N)ra   rw   rx   output_hidden_statesreturn_dictr=   c                 C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}|rDdnd}|rPdnd}|}	t| jD ]@\}
}|rx||	f }||	|||d}|d }	|rb||d f }qb|r||	f }t|	||dS )a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr   )rx   r   r   )r   r   r   )r#   rx   r   use_return_dict	enumerater   r
   )r7   inputs_embedsra   rw   rx   r   r   Zencoder_statesZall_attentionsr   idxZencoder_layerZlayer_outputsr   r   r    rX   c  s0    '

zIdeficsVisionEncoder.forward)NNNNN)r   r   r   r   r   r)   r   r   r   rY   rZ   r   r   r
   rX   r[   r   r   r8   r    r   T  s         
r   c                	       sZ   e Zd Zed fddZd	eej ee ee ee ee e	e
ef dddZ  ZS )
IdeficsVisionTransformerr"   c                    sR   t    || _|j}t|| _tj||jd| _	t
|| _tj||jd| _d S r   )r(   r)   r#   r*   r!   r:   r   r   r   pre_layrnormr   encoderpost_layernorm)r7   r#   r+   r8   r   r    r)     s    


z!IdeficsVisionTransformer.__init__NF)rR   rx   r   rQ   r   r=   c           
      C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}|du rLtd| j||d}| |}| j||||d}|d }|dddddf }	| |	}	|s||	f|dd  S t	||	|j
|jdS )z
        Returns:

        Nz You have to specify pixel_values)rQ   )r   rx   r   r   r   r   )r   Zpooler_outputr   r   )r#   rx   r   r   rN   r:   r   r   r   r   r   r   )
r7   rR   rx   r   rQ   r   r   Zencoder_outputsr   Zpooled_outputr   r   r    rX     s2    

z IdeficsVisionTransformer.forward)NNNFN)r   r   r   r   r)   r   r   r   rZ   r   r   r   rX   r[   r   r   r8   r    r     s        
r   )r\   )(r   rD   dataclassesr   typingr   r   r   r   Ztorch.utils.checkpointr   Zactivationsr   Zmodeling_layersr	   Zmodeling_outputsr
   r   Zmodeling_utilsr   utilsr   r   r   Zconfiguration_ideficsr   Z
get_loggerr   rI   r   Moduler!   rY   rL   rk   rl   r|   r   r   r   r   r   r   r    <module>   s>   
k P3X