a
    hfJ                     @   s^  d dl mZmZ d dlZd dlZd dlmZ ddlmZmZ ddl	m
Z
 ddlmZ ddlmZ dd	lmZmZmZ d
dlmZmZ d
dlmZ d
dlmZ d
dlmZmZmZmZmZ e e!Z"G dd deZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'G dd deZ(G dd deZ)G dd deZ*G dd  d eZ+g d!Z,dS )"    )OptionalUnionN)nn   )CacheDynamicCache)GenerationConfig)FlashAttentionKwargs)Unpack)auto_docstringcan_return_tuplelogging   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3ImageProcessorFast)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                   @   s   e Zd ZdZdZdS )SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zsmolvlm_visionN__name__
__module____qualname____doc__Z
model_type r   r   g/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   *   s   3r   c                   @   s   e Zd ZdS )SmolVLMPreTrainedModelNr   r   r   r   r   r   r   r    b   s   r    c                   @   s   e Zd ZdS )SmolVLMVisionTransformerNr!   r   r   r   r   r"   f   s   r"   c                   @   s   e Zd ZdZdZdS )SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```ZsmolvlmNr   r   r   r   r   r#   j   s   %r#   c                   @   s   e Zd ZdS )SmolVLMImageProcessorNr!   r   r   r   r   r$      s   r$   c                   @   s   e Zd ZdS )SmolVLMImageProcessorFastNr!   r   r   r   r   r%      s   r%   c                   @   s   e Zd ZdS )SmolVLMBaseModelOutputWithPastNr!   r   r   r   r   r&      s   r&   c                   @   s   e Zd ZdZejejejdddZdejejdddZ	e
ed	d
deej eej eej ee eej eej eej eej ee ee ee ee eej ee eeef dddZdS )SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                 C   s  |j \}}}|d u rB||  tj| jjtj|jdk}|d }n|| jjk}|jdd}t	|| dkstt
d|| }tjjj|jddddd}	|	d d	 }
|jd	d}|d | }|d | }|
d| }t|}||| || d d f ||< t|d	||}|S )
Ndtypedevice).r      dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r/   r   )value)shapeget_input_embeddingstorchZtensorconfigZimage_token_idlongr.   sumall
ValueErrorr   Z
functionalpadZcumsumZ	unsqueezeZ
zeros_likewhere)selfr)   r*   r+   _
patch_sizeZ
image_maskZnum_image_tokensZblocks_per_sampleoffsetsZblock_offsetZrow_cumZ	chunk_idxZ	local_idxZ	block_idxZimage_embedsZmerged_embedsr   r   r   inputs_merger   s*    

zSmolVLMModel.inputs_mergerN)pixel_valuespixel_attention_maskc                    s8   j \}}}}} j| jd  j|| g j dd R    j dd  } dkjdd|k}	t|	std|	d	<  |	   |du rtj	 fd
ddD tj
 jd}n,|j|| g|j dd R  }||	  }| jjj}
|jd|
|
d}|jd|
|
d}|jddd	k
 }| j |d}|j}| |}|S )a  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
            pixel_attention_mask (`torch.LongTensor`, *optional*):
                The attention mask indicating padded regions in the image.
        )r-   r   Nr/   g        )r3   r0   Tr   c                    s   g | ]} j | qS r   )r4   ).0irC   r   r   
<listcomp>       z3SmolVLMModel.get_image_features.<locals>.<listcomp>)r   r   r   )sizer-   r.   )	dimensionrL   step)r3   rE   )rC   patch_attention_mask)r4   tor-   viewZnumelr9   any
contiguousr6   Zonesboolr.   r7   Zvision_configr@   ZunfoldZvision_modellast_hidden_stateZ	connector)r>   rC   rD   
batch_sizeZ
num_imagesZnum_channelsheightwidthZnb_values_per_imageZreal_images_indsr@   Zpatches_subgridrO   r+   r   rI   r   get_image_features   s0    
  

zSmolVLMModel.get_image_featuresa  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        )Zcustom_intro)r)   attention_maskposition_idspast_key_valuesr*   rC   rD   r+   	use_cacheoutput_attentionsoutput_hidden_statesreturn_dictcache_positionkwargsreturnc                 K   s  |
d ur|
n| j j}
|d ur |n| j j}|	d ur4|	n| j j}	|d urH|n| j j}| jrp| jjrp|	rpt	d d}	|d ur|j
\}}n|d ur|j
\}}}ntd|	r|d u rt| j d}|d u r| j ||j}|d ur|d urtd|d ur| |||j}n|d ur,|j| j|jd}|d urF| j|||d}| jf |||||	|
|d|d		|}t|j|j|j|j|d
S )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embeds)r7   zMYou cannot specify both pixel_values and image_hidden_states at the same timer,   r(   T)	r*   rZ   r[   r\   r]   r^   r_   r`   ra   )rU   r\   hidden_states
attentionsr+   )r7   r^   r_   r]   Zuse_return_dictZtraining
text_modelZgradient_checkpointingloggerZwarning_oncer4   r;   r   r5   rP   r.   rY   r-   rB   r&   rU   r\   rd   re   )r>   r)   rZ   r[   r\   r*   rC   rD   r+   r]   r^   r_   r`   ra   rb   rV   Z
seq_lengthr?   outputsr   r   r   forward   sf    



zSmolVLMModel.forward)N)NNNNNNNNNNNNN)r   r   r   r   r6   Z
LongTensorZTensorrB   ZFloatTensorrY   r   r   r   r   Z
BoolTensorrT   r
   r	   r   tupler&   ri   r   r   r   r   r'      sL    /             
r'   c                       s(   e Zd Z fddZ fddZ  ZS )SmolVLMForConditionalGenerationc                    sL   t  | t|| _t|| jj_tj	|j
j|j
jdd| _|   d S )NF)Zbias)super__init__r'   modelr   Zfrom_model_configrf   Zgeneration_configr   ZLinearZtext_configZhidden_sizeZ
vocab_sizeZlm_headZ	post_init)r>   r7   	__class__r   r   rm   V  s
    
z(SmolVLMForConditionalGeneration.__init__c                    s   t  jf i | dS )a	  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The hidden states of the image encoder after modality projection.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
            ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```N)rl   ri   )r>   Zsuper_kwargsro   r   r   ri   ]  s    1z'SmolVLMForConditionalGeneration.forward)r   r   r   rm   ri   __classcell__r   r   ro   r   rk   U  s   rk   )r   r#   r$   r%   rk   r    r'   r"   )-typingr   r   r6   Ztorch.utils.checkpointr   Zcache_utilsr   r   Z
generationr   Zmodeling_flash_attention_utilsr	   Zprocessing_utilsr
   utilsr   r   r   Zidefics3.configuration_idefics3r   r   Z"idefics3.image_processing_idefics3r   Z'idefics3.image_processing_idefics3_fastr   Zidefics3.modeling_idefics3r   r   r   r   r   Z
get_loggerr   rg   r   r    r"   r#   r$   r%   r&   r'   rk   __all__r   r   r   r   <module>   s0   	
8* 6<