a
    h*                 
   @   sD  d dl Z d dlmZ d dlmZmZmZmZ d dlZd dl	m
Z
 d dlm
  mZ d dl	mZ ddlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z-m.Z. ddl/m0Z0 ddl1m2Z2m3Z3m4Z4 e.5e6Z7edG dd de
j8Z9G dd de
j8Z:G dd de
j8Z;G dd de
j8Z<G dd de
j8Z=G d d! d!e
j8Z>d"d# Z?ej@ej@ej@ej@eAej@ej@f d$d%d&ZBej@eCej@d'd(d)ZDdPe
j8ej@ej@ej@eej@ eEeEe(e* d+d,d-ZFG d.d/ d/e
j8ZGG d0d1 d1eZHG d2d3 d3e
j8ZId4d5 ZJdQd6d7ZKG d8d9 d9e
j8ZLG d:d; d;e
j8ZMG d<d= d=eZNee+d>d?G d@dA dAe ZOe+G dBdC dCe&ZPG dDdE dEePZQe+G dFdG dGePZRe+G dHdI dIePZSee+dJd?G dKdL dLe ZTG dMdN dNePeZUg dOZVdS )R    N)	dataclass)AnyCallableOptionalUnion)	LayerNorm   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastModelOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tupleis_torchdynamo_compilinglogging)check_model_inputs   )Glm4vConfigGlm4vTextConfigGlm4vVisionConfigZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	Glm4vRMSNormư>c                    s&   t    tt|| _|| _dS )z;
        Glm4vRMSNorm is equivalent to T5LayerNorm
        N)super__init__nn	Parametertorchonesweightvariance_epsilon)selfhidden_sizeeps	__class__ d/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/glm4v/modeling_glm4v.pyr%   3   s    
zGlm4vRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   TZkeepdim)	dtypetor(   float32powmeanZrsqrtr+   r*   )r,   hidden_statesZinput_dtypeZvariancer1   r1   r2   forward;   s
    zGlm4vRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler*   shaper+   r,   r1   r1   r2   
extra_reprB   s    zGlm4vRMSNorm.extra_repr)r#   )__name__
__module____qualname__r%   r<   r@   __classcell__r1   r1   r/   r2   r"   1   s   r"   c                       s,   e Zd Zded fddZdd Z  ZS )Glm4VisionMlpFbiasc                    sl   t    |j| _|j| _tj| j| j|d| _tj| j| j|d| _tj| j| j|d| _	t
|j | _d S NrF   )r$   r%   r-   out_hidden_sizeintermediate_sizer&   Linear	gate_projup_proj	down_projr	   
hidden_actact_fn)r,   configrG   r/   r1   r2   r%   G   s    
zGlm4VisionMlp.__init__c                 C   s    |  | | || | S N)rN   rP   rL   rM   r,   hidden_stater1   r1   r2   r<   P   s    zGlm4VisionMlp.forward)F)rA   rB   rC   boolr%   r<   rD   r1   r1   r/   r2   rE   F   s   	rE   c                       s8   e Zd Zedd fddZejejdddZ  ZS )Glm4vVisionPatchEmbedNrQ   returnc                    sV   t    |j| _|j| _|j| _|j| _| j| j| jg}tj| j| j||d| _	d S )N)kernel_sizestride)
r$   r%   
patch_sizetemporal_patch_sizein_channelsr-   	embed_dimr&   ZConv3dproj)r,   rQ   rY   r/   r1   r2   r%   U   s    
zGlm4vVisionPatchEmbed.__init__r;   rX   c                 C   sD   | j jj}|d| j| j| j| j}|  |j|dd| j}|S )Nr4   r6   )	r_   r*   r6   viewr]   r\   r[   r7   r^   )r,   r;   Ztarget_dtyper1   r1   r2   r<   _   s    
zGlm4vVisionPatchEmbed.forward	rA   rB   rC   r!   r%   r(   Tensorr<   rD   r1   r1   r/   r2   rV   T   s   
rV   c                       sF   e Zd ZU ejed< d
eedd fddZeejddd	Z	  Z
S )Glm4vVisionRotaryEmbeddinginv_freq     @N)dimthetarX   c                    s>   t    d|tjd|dtjd|   }| jd|dd d S )N      ?r   r3   ra   rf   F
persistent)r$   r%   r(   arangefloatregister_buffer)r,   rh   ri   rf   r/   r1   r2   r%   k   s    
 z#Glm4vVisionRotaryEmbedding.__init__)seqlenrX   c                 C   s*   t j|| jj| jjd}t || j}|S )Ndevicer6   )r(   rm   rf   rr   r6   outer)r,   rp   seqfreqsr1   r1   r2   r<   p   s    z"Glm4vVisionRotaryEmbedding.forward)rg   )rA   rB   rC   r(   rd   __annotations__intrn   r%   r<   rD   r1   r1   r/   r2   re   h   s   

re   c                       s@   e Zd Zd	eeeedd fddZejejdddZ	  Z
S )
Glm4vVisionPatchMergerFN)rh   context_dimrO   rG   rX   c                    st   t    tj|||d| _t|| _tj|||d| _tj|||d| _tj|||d| _	t
 | _t| | _d S rH   )r$   r%   r&   rK   r_   r   post_projection_normrL   rM   rN   ZGELUact1r	   rP   )r,   rh   ry   rO   rG   r/   r1   r2   r%   w   s    


zGlm4vVisionPatchMerger.__init__)rT   rX   c                 C   s:   |  |}| | |}| | | || | S rR   )r_   r{   rz   rN   rP   rL   rM   rS   r1   r1   r2   r<      s    
zGlm4vVisionPatchMerger.forward)F)rA   rB   rC   rw   strrU   r%   r(   rd   r<   rD   r1   r1   r/   r2   rx   v   s   
rx   c                       s2   e Zd Zed fddZejdddZ  ZS )Glm4vVisionEmbeddingsrQ   c                    sv   t    || _|j| _|j| _|j| _| j| j d | _| j| _t	
| j| j| _| jdt| jddd d S )Nr3   position_ids)r   r4   Frk   )r$   r%   rQ   r-   r^   Z
image_sizer[   Znum_patchesZnum_positionsr&   	Embeddingposition_embeddingro   r(   rm   expandr,   rQ   r/   r1   r2   r%      s    
zGlm4vVisionEmbeddings.__init__rX   c                    s  | j j}|jd }|jd }|j}	||	||	 }}|dkrXtjd||	|jd}
n|tt	rttj
|	tjdt tjstj
 |	tjd |jd }t|d }||||ddddj|	tjd}t fddttD j|	tjd}t fddttD j|	tjd}|j|	tjd}|j|	tjd}|d | d d }|d | d d }tj||fd	d
dd}tj||dddd}|dd	dd}||j|j}
||
 }|S )a  
        Forward pass with integrated position encoding adaptation using 2D interpolation.

        Args:
            embeddings: Input embeddings tensor
            lengths (torch.Tensor): Sequence lengths for each image in the batch.
            image_shapes (torch.Tensor): Tensor of shape [batch_size, 3] representing the image shapes (t, h, w).
            h_coords (torch.Tensor): Tensor of shape [total_seq] representing the h coordinate for each patch.
            w_coords (torch.Tensor): Tensor of shape [total_seq] representing the w coordinate for each patch.

        Returns:
            torch.Tensor: Embeddings with adapted position encoding added.
        r   r   rq   g      ?r3   c                    s"   g | ]} |d f  | qS )r   repeat.0iimage_shapeslengthsr1   r2   
<listcomp>       z1Glm4vVisionEmbeddings.forward.<locals>.<listcomp>c                    s"   g | ]} |d f  | qS )r3   r   r   r   r1   r2   r      r   r4   rh   ZbicubicFborder)modeZalign_cornersZpadding_mode)r   r*   r>   rr   r7   r(   emptyr6   
isinstancelisttensorlongrd   rw   rb   permute	unsqueezer8   catrangelenstackFZgrid_sampleZsqueeze)r,   
embeddingsr   r   Zh_coordsZw_coordsZpos_embed_weightr-   Z	total_seqrr   Zadapted_pos_embedZorig_size_sqZ	orig_sizeZpos_embed_2dZtarget_hZtarget_wZnorm_wZnorm_hgridZinterpolated_embed_fp32Zadapted_pos_embed_fp32r1   r   r2   r<      sN    



""
zGlm4vVisionEmbeddings.forwardrc   r1   r1   r/   r2   r}      s   r}   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )*Rotates half the hidden dims of the input..Nr4   r3   r   )r>   r(   r   xx1Zx2r1   r1   r2   rotate_half   s    r   )qkcossinrX   c                 C   s   | j }|j }|  |  } }|d |d  }}| | t| |  }|| t||  }||}||}||fS )N)r6   rn   r   r   r7   )r   r   r   r   Zorig_q_dtypeZorig_k_dtypeq_embedk_embedr1   r1   r2   apply_rotary_pos_emb_vision   s    

r   )r;   n_reprX   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r>   r   reshape)r;   r   batchnum_key_value_headsslenhead_dimr1   r1   r2   	repeat_kv   s
    0r           )modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr3   r   r   r4   rh   r6   )ptrainingr   )r   num_key_value_groupsr(   matmul	transposer>   r&   
functionalZsoftmaxr8   r7   r6   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightscausal_maskattn_outputr1   r1   r2   eager_attention_forward   s    
&r   c                       sX   e Zd Zedd fddZdejejeej eeejejf  ejdddZ	  Z
S )	Glm4vVisionAttentionNrW   c                    s   t    |j| _|j| _| j| j | _d| _tj|j|jd |j	d| _
tj|j|jdd| _| jd | _|| _|j| _d| _d S )Nr   r   rF   F      )r$   r%   r-   rh   	num_headsr   r   r&   rK   Zattention_biasqkvr_   r   rQ   attention_dropout	is_causalr   r/   r1   r2   r%     s    
zGlm4vVisionAttention.__init__r;   
cu_seqlensrotary_pos_embposition_embeddingsrX   c                    s  |j d }||djdddddd\}}}	|d u rptd tj	||fdd}
|

 }|
 }n|\}}t||||\}}|ddd}|ddd}|	ddd}	t jjdkrtjj  jjd	krD|dd  |d d   } |||	fd jjs$d
nj||||dd\}}nX|dd  |d d  fdd|||	fD } fddt| D }tj	|dd}||d }|}|S )Nr   r   r4   r   r3   aS  The attention layers in this model are transitioning from computing the RoPE embeddings internally through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be removed and `position_embeddings` will be mandatory.r   eagerZflash_attention_2r   F)r   r   r   Zcu_seq_lens_qZcu_seq_lens_kZmax_length_qZmax_length_kr   c                    s    g | ]}t j|  d dqS )r3   r   )r(   splittolist)r   r   )r   r1   r2   r   ^  s   z0Glm4vVisionAttention.forward.<locals>.<listcomp>c              	      sD   g | ]<\}}} |||fd j js*dnjddd qS )Nr   F)r   r   r   r   r   )r   r   r   )r   r   r   v)attention_interfacer   r,   r1   r2   r   b  s    	
)r>   r   r   r   r   ZunbindloggerZwarning_oncer(   r   r   r   r   r   r   r   rQ   _attn_implementationr   maxr   r   r   zipr   r_   )r,   r;   r   r   r   r   
seq_lengthquery_statesr   r   embr   r   Z
max_seqlenr   _ZsplitsZattn_outputsr1   )r   r   r   r,   r2   r<   '  s`    
(


zGlm4vVisionAttention.forward)NN)rA   rB   rC   r!   r%   r(   rd   r   r=   r<   rD   r1   r1   r/   r2   r     s     r   c                       sV   e Zd Zdd fddZdejejeej eeejejf  ejdddZ  Z	S )	Glm4vVisionBlockNr   c                    sJ   t    t|j|jd| _t|j|jd| _t|| _t	|dd| _
d S )Nr.   FrF   )r$   r%   r"   r-   rms_norm_epsnorm1norm2r   attnrE   mlpr   r/   r1   r2   r%   x  s
    

zGlm4vVisionBlock.__init__r   c                 K   s<   || j | |f|||d| }|| | | }|S )N)r   r   r   )r   r   r   r   )r,   r;   r   r   r   r   r1   r1   r2   r<     s    zGlm4vVisionBlock.forward)NN)
rA   rB   rC   r%   r(   rd   r   r=   r<   rD   r1   r1   r/   r2   r   w  s     r   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	Glm4vTextRotaryEmbeddingrf   Nr~   c                    s   t    t|dr8|jd ur8|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultrf   Frk   )r$   r%   hasattrr   getr   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrQ   r   Zrope_init_fnattention_scalingro   rf   Zoriginal_inv_freq)r,   rQ   rr   rf   r/   r1   r2   r%     s    
z!Glm4vTextRotaryEmbedding.__init__c           
      C   s  | j d d d d d f  d|jd dd}|d d d d d d d f  }t|jjtrn|jjdkrn|jjnd}tj	|ddV | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r   r4   ZmpscpuF)device_typeZenabledr3   r   ra   )rf   rn   r   r>   r   rr   r   r|   r(   Zautocastr   r   r   r   r   r7   r6   )
r,   r   r   Zinv_freq_expandedZposition_ids_expandedr   ru   r   r   r   r1   r1   r2   r<     s    , &,z Glm4vTextRotaryEmbedding.forward)N)rA   rB   rC   r(   rd   rv   r    r%   Zno_gradr   r<   rD   r1   r1   r/   r2   r     s
   

r   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	r   .r   Nr3   r   r4   r   r   )r(   r   flattenr   r1   r1   r2   rotate_half_llm  s    r   c                 C   sJ  |d }t jdd t|j|ddD dd|}t jdd t|j|ddD dd|}|dd|jd d f jddd}|dd|jd d f jddd}|jd }| dd|f | d|df  }}|dd|f |d|df  }	}
|| t||  }|	| t|	|  }t j||gdd}t j||
gdd}||fS )	aX  Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).

    Explanation:
        Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
        sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
        vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
        Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
        For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
        height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
        difference with modern LLMs.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        mrope_section(`List(int)`):
            Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    r3   c                 S   s   g | ]\}}||d   qS r   r1   r   r   mr1   r1   r2   r     r   z3apply_multimodal_rotary_pos_emb.<locals>.<listcomp>r4   r   c                 S   s   g | ]\}}||d   qS r   r1   r   r1   r1   r2   r     r   .N)r(   r   	enumerater   r   r>   repeat_interleaver   )r   r   r   r   mrope_sectionZunsqueeze_dimZ
rotary_dimZq_rotZq_passZk_rotZk_passr   r   r1   r1   r2   apply_multimodal_rotary_pos_emb  s"    &&$$
""r   c                       s   e Zd ZdZd	eee d fddZd
ej	e
ej	ej	f eej	 eej ee eej ee e
ej	eej	 ee
ej	  f dddZ  ZS )Glm4vTextAttentionz
    Multi-headed attention from 'Attention Is All You Need' paper.
    and "Generating Long Sequences with Sparse Transformers".
    NrQ   	layer_idxc                    s   t    || _|| _|j| _|j| _| j| j | _|j| _| j| j | _	d| _
|j| _|j| _| jd | _tj| j| j| j dd| _tj| j| j| j dd| _tj| j| j| j dd| _tj| j| j | jdd| _d S )NTr   rF   F)r$   r%   rQ   r   r-   Znum_attention_headsr   r   r   r   r   r   r   r   r&   rK   q_projk_projv_projo_projr,   rQ   r   r/   r1   r2   r%     s     
zGlm4vTextAttention.__init__)r;   r   r   r   past_key_valuescache_positionr   rX   c                 K   s6  |  \}}	}
| |}| |}| |}|||	d| jdd}|||	d| jdd}|||	d| jdd}|\}}t||||| jd \}}|d ur|||d}|	||| j
|\}}t}| jjdkrt| jj }|| ||||f| jsdn| j| jd|\}}|||	d }| |}||fS )	Nr4   r   r3   r   )r   r   r  r   r   )r   r   )sizer   r   r  rb   r   r   r   r   updater   r   rQ   r   r   r   r   r   r   r   r  )r,   r;   r   r   r   r  r  r   ZbszZq_lenr   r   r   r   r   r   Zcache_kwargsr   r   r   r1   r1   r2   r<     s@    





zGlm4vTextAttention.forward)N)NNNN)rA   rB   rC   __doc__r    r   rw   r%   r(   rd   r=   
LongTensorr
   r   r   r<   rD   r1   r1   r/   r2   r     s        r   c                       s0   e Zd Z fddZejejdddZ  ZS )Glm4vTextMLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )Nr3   FrF   )r$   r%   rQ   r&   rK   r-   rJ   gate_up_projrN   r	   rO   activation_fnr   r/   r1   r2   r%   D  s
    
zGlm4vTextMLP.__init__r`   c                 C   s4   |  |}|jddd\}}|| | }| |S )Nr3   r4   r   )r  chunkr  rN   )r,   r;   Z	up_statesZgater1   r1   r2   r<   L  s    
zGlm4vTextMLP.forward)rA   rB   rC   r%   r(   FloatTensorr<   rD   r1   r1   r/   r2   r
  C  s   r
  c                       s   e Zd Zeed fddZd	ejeejejf e	ej e	ej
 e	eej  e	e e	e e	ej
 eeje	eejejf  f d	ddZ  ZS )
Glm4vTextDecoderLayerr   c                    st   t    |j| _t||| _t|| _t|j|jd| _	t|j|jd| _
t|j|jd| _t|j|jd| _d S )Nr   )r$   r%   r-   r   	self_attnr
  r   r"   r   input_layernormpost_attention_layernormpost_self_attn_layernormpost_mlp_layernormr  r/   r1   r2   r%   V  s    

zGlm4vTextDecoderLayer.__init__NF)	r;   r   r   r   r  output_attentions	use_cacher  rX   c	                 K   st   |}
|  |}| jf ||||||||d|	\}}| |}|
| }|}
| |}| |}| |}|
| }|S )N)r;   r   r   r   r  r  r  r  )r  r  r  r  r   r  )r,   r;   r   r   r   r  r  r  r  r   Zresidualr   r1   r1   r2   r<   `  s,    
	




zGlm4vTextDecoderLayer.forward)NNNFFN)rA   rB   rC   r    rw   r%   r(   rd   r=   r   r	  rU   r  r<   rD   r1   r1   r/   r2   r  U  s$         r  zJ
    Base class for Llava outputs, with hidden states and attentions.
    )Zcustom_introc                   @   st   e Zd ZU dZdZejed< dZe	e
ej  ed< dZe	eej  ed< dZe	eej  ed< dZe	ej ed< dS )Glm4vModelOutputWithPasta  
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
        `(batch_size, num_heads, sequence_length, embed_size_per_head)`)

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
        The rope index difference between sequence length and multimodal rope.
    Nlast_hidden_stater  r;   
attentionsrope_deltas)rA   rB   rC   r  r  r(   r  rv   r  r   r   r;   r=   r  r  r	  r1   r1   r1   r2   r    s   
r  c                   @   sD   e Zd ZU eed< dZdZddgZdZdZ	dZ
dZdZeedZdS )	Glm4vPreTrainedModelrQ   modelTr  r   r  )r;   r  N)rA   rB   rC   r   rv   base_model_prefixZsupports_gradient_checkpointing_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_can_compile_fullgraphZ_supports_attention_backendr  r   Z_can_record_outputsr1   r1   r1   r2   r    s   
r  c                       sR   e Zd ZU eed< dgZdd fddZdd Zej	ej	ej	d	d
dZ
  ZS )Glm4vVisionModelrQ   r   Nr   c                    s   t     j| _ j| _t | _t | _ j j	 }t
|d | _t fddt jD | _t j j jd| _t j jd| _tj j j j jd| _t j jd| _d| _|   d S )Nr3   c                    s   g | ]}t  qS r1   )r   )r   r   r~   r1   r2   r     r   z-Glm4vVisionModel.__init__.<locals>.<listcomp>)rh   ry   rO   r   )r]   Zout_channelsrY   rZ   F)r$   r%   spatial_merge_sizer[   r}   r   rV   patch_embedr-   r   re   r   r&   
ModuleListr   depthblocksrx   rI   rJ   rO   mergerr"   r   post_conv_layernormZConv2d
downsamplepost_layernormgradient_checkpointing	post_init)r,   rQ   r   r/   r~   r2   r%     s*    

 zGlm4vVisionModel.__init__c                 C   s  g }|D ]\}}}t |dd|}||| j | j|| j | j}|dddd}| }t |d|d}||| j | j|| j | j}|dddd}| }|t j	||gdd
|d qt j|dd}|d d dd f  }| |}	|	| d}
|
|fS )Nr   r4   r   r3   r   r   )r(   rm   r   r   r   r   r   r   appendr   r   r   r   r   )r,   grid_thwZpos_idsthwZhpos_idsZwpos_idsZmax_grid_sizeZrotary_pos_emb_fullr   r1   r1   r2   rot_pos_emb  s4    "
zGlm4vVisionModel.rot_pos_emb)r;   r,  rX   c           
   	   C   s\  |  |}| |}| |\}}tj||fdd}| | f}t|dddf |dddf  |dddf jdtj	
 r|jntjd}tj|ddd	}|dd |dd   }| ||||dddf |dddf }| jD ]}	|	|||d
}q| |}|d| j| j|jd }|dddd}| |d| jj}| |}|S )az  
        Args:
            hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
                The final hidden states of the model.
            grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
                The temporal, height and width of feature shape of each image in LLM.

        Returns:
            `torch.Tensor`: hidden_states.
        r4   r   Nr   r3   r   r   )r   r   )r   )r   r   r   )r!  r&  r0  r(   r   r   r   r   cumsumjit
is_tracingr6   Zint32r   padr   r   r$  r(  rb   r   r>   r   r'  rQ   rI   r%  )
r,   r;   r,  r   Zimage_type_idsr   r   r   ZseqlensZblkr1   r1   r2   r<     s4    

4*


zGlm4vVisionModel.forward)rA   rB   rC   r!   rv   r  r%   r0  r(   rd   r<   rD   r1   r1   r/   r2   r    s
   
r  c                       s   e Zd ZU eed< ed fddZeed	ee	j
 ee	j ee	j
 eee	j  ee	j ee ee	j
 ee eeef d	ddZ  ZS )
Glm4vTextModelrQ   r~   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r1   )r  )r   r   r~   r1   r2   r   .  r   z+Glm4vTextModel.__init__.<locals>.<listcomp>r   r~   F)r$   r%   Zpad_token_idZpadding_idx
vocab_sizer&   r   r-   embed_tokensr"  r   Znum_hidden_layerslayersr"   r   normr   
rotary_embr)  r*  r   r/   r~   r2   r%   '  s    zGlm4vTextModel.__init__N)		input_idsr   r   r  inputs_embedsr  r  r   rX   c              	   K   s8  |d u |d uA rt d|r:|d u r:tj s:t| jd}|d u rL| |}|d u r|d urd| nd}	tj|	|	|j	d  |j
d}|d u r|dddd|j	d d}n$| dkr|d	 d|j	d d}t| j|||||d
}
|}| ||}| jD ]$}||f||
|||d|}|}q| |}t||dS )N:You must specify exactly one of input_ids or inputs_embedsr~   r   r   rr   r4   r   r3   )N.)rQ   Zinput_embedsr   r  r  r   )r   r   r   r  r  )r  r  )
ValueErrorr(   r2  r3  r   rQ   r7  get_seq_lengthrm   r>   rr   rb   r   rh   r   r:  r8  r9  r   )r,   r;  r   r   r  r<  r  r  r   Zpast_seen_tokensr   r;   r   Zdecoder_layerZlayer_outputsr1   r1   r2   r<   7  sV    
 	
	
zGlm4vTextModel.forward)NNNNNNN)rA   rB   rC   r    rv   r%   r   r   r   r(   r	  rd   r   r  rU   r   r   r   r=   r   r<   rD   r1   r1   r/   r2   r5  #  s,   
       
r5  c                       sf  e Zd ZU dZi ZdZeed< ddgZ fddZ	dd	 Z
d
d Zdd Zdd Zd eej eej eej eej eejejf dddZd!ejeej dddZd"ejeej dddZd#ejejejejdddZeed$ejeej eej eeej  eej eej eej eej eej eej eej ee eeef dddZ  Z S )%
Glm4vModel FrQ   r  r   c                    s:   t  | t|j| _t|j| _d | _	| 
  d S rR   )r$   r%   r  _from_configvision_configvisualr5  text_configlanguage_modelr  r*  r   r/   r1   r2   r%     s
    zGlm4vModel.__init__c                 C   s
   | j  S rR   )rG  get_input_embeddingsr?   r1   r1   r2   rH    s    zGlm4vModel.get_input_embeddingsc                 C   s   | j | d S rR   )rG  set_input_embeddingsr,   r   r1   r1   r2   rI    s    zGlm4vModel.set_input_embeddingsc                 C   s
   || _ d S rR   rG  r,   decoderr1   r1   r2   set_decoder  s    zGlm4vModel.set_decoderc                 C   s   | j S rR   rK  r?   r1   r1   r2   get_decoder  s    zGlm4vModel.get_decoderN)r;  image_grid_thwvideo_grid_thwr   rX   c           ,   
   C   s*  | j jj}| j j}| j j}| j j}g }	|durT|dusB|durT|}
|du rXt|
}tjd|j	d |j	d |j
|jd}d\}}d}||
j}t|
D ]\}}||| dk }| }g }d}|D ]`}||krd}n||krd}||kr|s|d	 q||kr |r |d
 q|d qg }tt|dd D ]>\}}t|}|d d }|d d d }||||f qDg }d}|D ]N\}}}t|dkr|d  d nd}|d	kr|| d || d || d   }} }!| |  | |! |   }"}#}$t|"ddd|#|$  }%t|#ddd|"d|$ }&t|$ddd|"|#d }'|t|%|&|'g|  |d7 }d}q|d
kr||| d || d   }} }!||  | |! |   }"}#}$t|"D ]}(t|(ddd|#|$  }%t|#ddddd|$ }&t|$dddd|#d }'|t|%|&|'g|  q|d7 }||| d kr|d7 }d}|d7 }n0|| })|t|)dddd|  d}qtj|dddd}*|*|j|d||| dkf< |	|* d t|
|   qtj|	|jdd}	||	fS |dur|  !dd }|"|dkd |dddd|j}|jdddd jdddd }+|+d |j	d  }	nNtj|j	d |jddddd|j	d d}tj#|j	d dg|j|j
d}	||	fS dS )aU  
        Calculate the 3D rope index based on image and video's temporal, height and width in LLM.

        Explanation:
            Each embedding sequence contains vision embedding and text embedding or just contains text embedding.

            For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
            Examples:
                input_ids: [T T T T T], here T is for text.
                temporal position_ids: [0, 1, 2, 3, 4]
                height position_ids: [0, 1, 2, 3, 4]
                width position_ids: [0, 1, 2, 3, 4]

            For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
            and 1D rotary position embedding for text part.
            Examples:
                Temporal (Time): 3 patches, representing different segments of the video in time.
                Height: 2 patches, dividing each frame vertically.
                Width: 2 patches, dividing each frame horizontally.
                We also have some important parameters:
                fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
                tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
                temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
                interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
                input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
                vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
                vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
                vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
                text temporal position_ids: [101, 102, 103, 104, 105]
                text height position_ids: [101, 102, 103, 104, 105]
                text width position_ids: [101, 102, 103, 104, 105]
                Here we calculate the text start position_ids as the max vision position_ids plus 1.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
                it.
            image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
                The temporal, height and width of feature shape of each image in LLM.
            video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
                The temporal, height and width of feature shape of each video in LLM.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

        Returns:
            position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
            mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
        Nr   r   r   r6   rr   )r   r   FTimageZvideotextc                 S   s   | d S )Nr   r1   )r   r1   r1   r2   <lambda>  r   z+Glm4vModel.get_rope_index.<locals>.<lambda>r4   r3   r   .r>  r5   rq   )$rQ   rD  r   image_token_idvideo_start_token_idvideo_end_token_idr(   Z	ones_liker)   r>   r6   rr   r7   r   r   r+  	itertoolsgroupbyr   r   r   itemrm   rb   r   r   r   r   r   r   r   r   r   r1  Zmasked_fill_Zzeros),r,   r;  rP  rQ  r   r   rV  rW  rX  Zmrope_position_deltasZtotal_input_idsr   Zimage_indexZvideo_indexZvideo_group_indexr   Zinput_tokensZinput_token_typeZvideo_check_flgtokenZinput_type_groupr   groupstart_indexZ	end_indexZllm_pos_ids_listZvideo_frame_numZmodality_typeZ	start_idxZend_idxZst_idxr-  r.  r/  Z
llm_grid_tZ
llm_grid_hZ
llm_grid_wZt_indexZh_indexZw_indexZt_idxZtext_lenZllm_positionsZmax_position_idsr1   r1   r2   get_rope_index  s    ;

"







"""






"""
$ 
 zGlm4vModel.get_rope_indexpixel_values_videosrQ  c                 C   s   | | jj}g }|D ]:\}}}td| | gd|d}|| qtj	|dd}| j||d}	|
d| jjd   }
t|	|
}	|	S )a  
        Encodes videos into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input videos.
            video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
                The temporal, height and width of feature shape of each video in LLM.
        r   r   r   r,  r4   r3   )r   rE  r6   r(   r   r[  r   r   r+  r   prodr   r   r   )r,   ra  rQ  Ztemp_frames_hwr-  r.  r/  Zrepeated_rowZflattened_video_grid_thwvideo_embedssplit_sizesr1   r1   r2   get_video_featuresX  s    &zGlm4vModel.get_video_featurespixel_valuesrP  c                 C   sF   | | jj}| j||d}|d| jjd   }t||}|S )a  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
            image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
                The temporal, height and width of feature shape of each image in LLM.
        rb  r4   r3   )r   rE  r6   rc  r   r   r(   r   )r,   rh  rP  image_embedsre  r1   r1   r2   get_image_featuresp  s
    
zGlm4vModel.get_image_features)r;  r<  image_featuresvideo_featuresc           	      C   s6  |du rf||   tj| jjtj|jdk}|d}||   tj| jjtj|jdk}|d}n|| jjk}|| jjk}|	 }|
d||j}|dur||  | krtd| d|jd  |	 }|
d||j}|dur.||  | kr.td| d|jd  ||fS )z
        Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
        equal to the length of multimodal features. If the lengths are different, an error is raised.
        NrR  r4   z6Image features and image tokens do not match: tokens: z, features r   z7Videos features and video tokens do not match: tokens: )rH  r(   r   rQ   rV  r   rr   allZvideo_token_idsumr   Z	expand_asr7   Znumelr?  r>   )	r,   r;  r<  rk  rl  Zspecial_image_maskZspecial_video_maskZn_image_tokensZn_video_tokensr1   r1   r2   get_placeholder_mask  s0    
 zGlm4vModel.get_placeholder_mask)r;  r   r   r  r<  rh  ra  rP  rQ  r  r  r   rX   c              	   K   s  |du |duA rt d|du r,|  |}|durz| ||}tj|dd|j|j}| j|||d\}}|	||}|dur| 
||	}tj|dd|j|j}| j|||d\}}|	||}|du r~t|ts|n|d }|durB|jdkrBtj|dddf d	d
d}|jjrB|t|jj }d|  }t oz|durd|jd	 d	kpz|duoz|jd	 d	k}t  o|dur|d dkp|du p| dk}|s|s| jdu r| j|||	|d\}}
|
| _n|j\}}}|dur|d | j |jnd}tj||jd}|d	d|d}|dur`|j||jd  dd}||}|dddd}| jf d|||||d|}t|j |j!|j"|j#| jdS )a  
        image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
            The temporal, height and width of feature shape of each image in LLM.
        video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
            The temporal, height and width of feature shape of each video in LLM.
        rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
            The rope index difference between sequence length and multimodal rope.
        Nr=  r   r   )rk  )rl  Zfull_attention   r   r3   )Zdim1Zdim2rj   )r   r>  r4   r   )r;  r   r   r  r<  r  )r  r  r;   r  r  )$r?  rH  rj  r(   r   r7   rr   r6   ro  Zmasked_scatterrf  r   dictndimZdiagonalZis_floating_pointZfinfominrw   r   r>   r@  r  r_  rm   rb   r   r   addr   rG  r  r  r  r;   r  )r,   r;  r   r   r  r<  rh  ra  rP  rQ  r  r  r   ri  Z
image_maskr   rd  Z
video_maskZattention_mask_tensorZprefill_compiled_stageZprefill_noncompiled_stageZ
batch_sizer   deltaoutputsr1   r1   r2   r<     s    






zGlm4vModel.forward)NNNN)N)N)NN)NNNNNNNNNNN)!rA   rB   rC   r  _checkpoint_conversion_mappingaccepts_loss_kwargsr   rv   r  r%   rH  rI  rN  rO  r   r(   r	  rd   r=   r_  r  rf  rj  ro  r   r   r   r   r   r   r  r<   rD   r1   r1   r/   r2   rA  |  s~   
	     @   )           
rA  zQ
    Base class for Glm4v causal language model (or autoregressive) outputs.
    c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< dZeej ed< dS )	Glm4vCausalLMOutputWithPasta  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
        `(batch_size, num_heads, sequence_length, embed_size_per_head)`)

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
        The rope index difference between sequence length and multimodal rope.
    Nlosslogitsr  r;   r  r  )rA   rB   rC   r  rz  r   r(   r  rv   r{  r  r   r;   r=   r  r  r	  r1   r1   r1   r2   ry    s   
ry  c                       s  e Zd Zi ZdgZdZ fddZdd Zdd Zd	d
 Z	dd Z
d&ejeej dddZd'ejeej dddZedd Zedd Zeed(ejeej eej eeej  eej eej eej eej eej eej eej eej eeejf ee eeef dddZd) fdd	Zd*eej eej eejejf dd d!Z d+ee!eej eeje"e#e$f f d#d$d%Z%  Z&S ),Glm4vForConditionalGenerationzlm_head.weightFc                    s<   t  | t|| _tj|jj|jjdd| _	| 
  d S )NFrF   )r$   r%   rA  r  r&   rK   rF  r-   r6  lm_headr*  r   r/   r1   r2   r%   9  s    
z&Glm4vForConditionalGeneration.__init__c                 C   s
   | j  S rR   )r  rH  r?   r1   r1   r2   rH  @  s    z2Glm4vForConditionalGeneration.get_input_embeddingsc                 C   s   | j | d S rR   )r  rI  rJ  r1   r1   r2   rI  C  s    z2Glm4vForConditionalGeneration.set_input_embeddingsc                 C   s   | j | d S rR   )r  rN  rL  r1   r1   r2   rN  F  s    z)Glm4vForConditionalGeneration.set_decoderc                 C   s
   | j  S rR   )r  rO  r?   r1   r1   r2   rO  I  s    z)Glm4vForConditionalGeneration.get_decoderNr`  c                 C   s   | j ||S rR   )r  rf  )r,   ra  rQ  r1   r1   r2   rf  L  s    z0Glm4vForConditionalGeneration.get_video_featuresrg  c                 C   s   | j ||S rR   )r  rj  )r,   rh  rP  r1   r1   r2   rj  Q  s    z0Glm4vForConditionalGeneration.get_image_featuresc                 C   s   | j jS rR   )r  rG  r?   r1   r1   r2   rG  U  s    z,Glm4vForConditionalGeneration.language_modelc                 C   s   | j jS rR   )r  rE  r?   r1   r1   r2   rE  Y  s    z$Glm4vForConditionalGeneration.visualr   )r;  r   r   r  r<  labelsrh  ra  rP  rQ  r  r  logits_to_keepr   rX   c                 K   s   | j f ||||	|
|||||d
|}|d }t|trDt| dn|}| |dd|ddf }d}|dur| j||| jjjd}t	|||j
|j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
            The temporal, height and width of feature shape of each image in LLM.
        video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
            The temporal, height and width of feature shape of each video in LLM.
        rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
            The rope index difference between sequence length and multimodal rope.

        Example:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Glm4vForConditionalGeneration

        >>> model = Glm4vForConditionalGeneration.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
        >>> processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")

        >>> messages = [
            {
                "role": "user",
                "content": [
                    {"type": "image"},
                    {"type": "text", "text": "What is shown in this image?"},
                ],
            },
        ]
        >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
        ```)
r;  rh  ra  rP  rQ  r   r   r  r<  r  r   N)r{  r~  r6  )rz  r{  r  r;   r  r  )r  r   rw   slicer}  Zloss_functionrQ   rF  r6  ry  r  r;   r  r  )r,   r;  r   r   r  r<  r~  rh  ra  rP  rQ  r  r  r  r   rv  r;   Zslice_indicesr{  rz  r1   r1   r2   r<   ]  s8    =z%Glm4vForConditionalGeneration.forwardTc                    sR   t  j|f|||||||	|
||d
|}d |d< |d dkrNd |d< d |d< |S )N)
r  r   r<  r  r   rh  ra  rP  rQ  r  r   r   rh  ra  )r$   prepare_inputs_for_generation)r,   r;  r  r   r<  r  r   r  rh  ra  rP  rQ  r   Zmodel_inputsr/   r1   r2   r    s*    z;Glm4vForConditionalGeneration.prepare_inputs_for_generation)r;  r<  rX   c                 C   s   |dur||   tj| jjtj|jdkd }||   tj| jjtj|jdkd }||   tj| jjtj|jdkd }n$|| jjk}|| jjk}|| jjk}tj	|
 |
  dd}|dk}|| @ }|jdd}	|jdd}
|	|
fS )aa  
        Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
        These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary.

        Returns:
            image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
            video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
        NrR  ).r   r   r   r   )rH  r(   r   rQ   Zimage_start_token_idr   rr   rW  rX  r1  rw   rn  )r,   r;  r<  Zis_imageZis_video_startZis_video_endZvideo_levelZinside_videoZstandalone_imagesZimage_countsZvideo_countsr1   r1   r2   _get_image_nums_and_video_nums  s>    
z<Glm4vForConditionalGeneration._get_image_nums_and_video_numsr   )expand_sizeis_encoder_decoderr;  rX   c                    s    dkrfS g d fdd} fdd}|d urVj  dd||rd	d u rxtd
|d	 d	< fS )Nr   )rh  rP  ra  rQ  second_per_grid_tsc           	         s8   dd } dd }j dd d\}}dd }| D ]}|dkrt|t|}dd	 |D }|| | | d
| |< q>|dkrt|}|| | | d
| |< q>|dkrt|t|}dd	 |D }|| | | d
| |< q>|dkrt|}|| | | d
| |< q>|dkr>|| | t| d
| |< q>| S )NrP  rQ  r<  )r<  c                    sD   t | |}|gdg|  d    t j fdd|D dd}|S )Nr   c                    s   g | ]}|j   qS r1   r   r   sampleZrepeat_argsr1   r2   r   7  r   zGlm4vForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visual.<locals>._repeat_interleave_samples.<locals>.<listcomp>r   r   )r(   r   rh   r   )r   r   repeat_timessamplesresultr1   r  r2   _repeat_interleave_samples4  s    zGlm4vForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visual.<locals>._repeat_interleave_samplesrh  c                 S   s   g | ]}t j|d d qS r   r   r(   rc  rn  r  r1   r1   r2   r   ?  r   z{Glm4vForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visual.<locals>.<listcomp>)r   r  ra  c                 S   s   g | ]}t j|d d qS r  r  r  r1   r1   r2   r   K  r   r  )r   r  r(   r   r   )	dict_to_expandrP  rQ  Z
image_numsZ
video_numsr  r   r  r   )r  r;  model_kwargsr,   r1   r2   "_expand_dict_for_generation_visual-  sD    





zgGlm4vForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visualc                    sP   | D ]F}|dkr| | d urt | | tjr|vr| | j dd| |< q| S )Nr  r   r   )r   r(   rd   r   )r  r   )r  visual_keysr1   r2   _expand_dict_for_generationZ  s    
z`Glm4vForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generationr   r   Zencoder_outputszMIf `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.)r   r   r?  )r,   r  r  r;  r  r  r  r1   )r  r;  r  r,   r  r2   _expand_inputs_for_generation  s    -z;Glm4vForConditionalGeneration._expand_inputs_for_generation)N)N)NNNNNNNNNNNNr   )
NNNNNTNNNN)N)r   FN)'rA   rB   rC   rw  Z_tied_weights_keysrx  r%   rH  rI  rN  rO  r(   r  r   r	  rf  rj  propertyrG  rE  r   r   rd   r   r   rw   r   r   r=   ry  r<   r  r  rU   rq  r|   r   r  rD   r1   r1   r/   r2   r|  3  s    

             
_          , :   r|  )r|  rA  r  r5  )r   )r   )WrY  dataclassesr   typingr   r   r   r   r(   Ztorch.nnr&   Ztorch.nn.functionalr   r   r   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zintegrationsr   Zmasking_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zutils.genericr   Zconfiguration_glm4vr   r    r!   Z
get_loggerrA   r   Moduler"   rE   rV   re   rx   r}   r   rd   r=   r   rw   r   rn   r   r   r   r   r   r   r   r
  r  r  r  r  r5  rA  ry  r|  __all__r1   r1   r1   r2   <module>   s   
W ^&
9K5nX     C