a
    hS                  
   @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$ ddl%m&Z& ddl'm(Z( ddl)m*Z* G dd dej+Z,ej-e.ej-dddZ/d4ej+ej-ej-ej-eej- e0e0e e" dddZ1dd Z2d5ddZ3G d d! d!ej+Z4ed"G d#d$ d$ej+Z5G d%d& d&ej+Z6G d'd( d(eZ7e#G d)d* d*eZ8e#G d+d, d,e8Z9e#G d-d. d.e8eZ:G d/d0 d0ee8Z;G d1d2 d2ee8Z<g d3Z=dS )6    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )	GlmConfigc                       s0   e Zd Z fddZejejdddZ  ZS )GlmMLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )N   FZbias)super__init__confignnLinearhidden_sizeZintermediate_sizegate_up_proj	down_projr   Z
hidden_actactivation_fnselfr"   	__class__ `/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/glm/modeling_glm.pyr!   0   s
    
zGlmMLP.__init__)hidden_statesreturnc                 C   s4   |  |}|jddd\}}|| | }| |S )Nr   dim)r&   chunkr(   r'   )r*   r/   Z	up_statesZgater-   r-   r.   forward8   s    
zGlmMLP.forward)__name__
__module____qualname__r!   torchFloatTensorr5   __classcell__r-   r-   r+   r.   r   /   s   r   )r/   n_repr0   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)shapeexpandreshape)r/   r<   batchnum_key_value_headsslenhead_dimr-   r-   r.   	repeat_kvA   s
    0rD           )modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r1   )r3   dtype)ptrainingr   )rD   num_key_value_groupsr9   matmul	transposer=   r#   Z
functionalZsoftmaxfloat32torO   rL   rQ   
contiguous)rF   rG   rH   rI   rJ   rK   rL   rM   
key_statesvalue_statesattn_weightscausal_maskattn_outputr-   r-   r.   eager_attention_forwardM   s    
&r]   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	z*Rotates half the hidden dims of the input..r   Nr   r   r1   r2   rN   )r9   stackflatten)xx1Zx2r-   r-   r.   rotate_halfg   s    rb   c                 C   s   | |}| |}|dd|jd d f jddd}|dd|jd d f jddd}|jd }| dd|f | d|df  }}|dd|f |d|df  }	}
|| t||  }|	| t|	|  }tj||gdd}tj||
gdd}||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Nr1   r   r2   )	unsqueezer=   Zrepeat_interleaverb   r9   cat)qkcossinposition_idsZunsqueeze_dimZ
rotary_dimZq_rotZq_passZk_rotZk_passZq_embedZk_embedr-   r-   r.   apply_rotary_pos_embn   s    

$$
""rj   c                       s   e Zd ZdZdeee d fddZedddd	de	j
ee	j
e	j
f ee	j
 ee ee	j ee ee	j
e	j
f d
ddZ  ZS )GlmAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr"   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |jdd| _d S )NrC   g      Tr   F)r    r!   r"   rm   getattrr%   Znum_attention_headsrC   rA   rR   rK   attention_dropoutZ	is_causalr#   r$   Zattention_biasq_projk_projv_projo_projr*   r"   rm   r+   r-   r.   r!      s$    
zGlmAttention.__init__past_key_valuepast_key_values4.58new_nameversion)r/   position_embeddingsrJ   rv   cache_positionrM   r0   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d ur|||d}||
|| j	|\}
}t
}| jjdkrt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )Nr1   r   r   )rh   rg   r|   eagerrE   )rL   rK   )r=   rC   rp   viewrT   rq   rr   rj   updaterm   r]   r"   Z_attn_implementationr   rQ   ro   rK   r?   rW   rs   )r*   r/   r{   rJ   rv   r|   rM   Zinput_shapeZhidden_shapeZquery_statesrX   rY   rg   rh   Zcache_kwargsZattention_interfacer\   rZ   r-   r-   r.   r5      s8    


zGlmAttention.forward)N)NN)r6   r7   r8   __doc__r   r   intr!   r   r9   Tensortupler   
LongTensorr   r   r5   r;   r-   r-   r+   r.   rk      s     rk   ZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	
GlmRMSNormư>c                    s&   t    tt|| _|| _dS )z9
        GlmRMSNorm is equivalent to T5LayerNorm
        N)r    r!   r#   	Parameterr9   Zonesweightvariance_epsilon)r*   r%   epsr+   r-   r.   r!      s    
zGlmRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r1   T)Zkeepdim)	rO   rV   r9   rU   powmeanZrsqrtr   r   )r*   r/   Zinput_dtypeZvariancer-   r-   r.   r5      s
    zGlmRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   r=   r   )r*   r-   r-   r.   
extra_repr   s    zGlmRMSNorm.extra_repr)r   )r6   r7   r8   r!   r5   r   r;   r-   r-   r+   r.   r      s   r   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	GlmRotaryEmbeddinginv_freqNr"   c                    s   t    t|dr:t|jtr:|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr   F)
persistent)r    r!   hasattr
isinstancer   dictgetr   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr"   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r*   r"   devicer   r+   r-   r.   r!      s    
zGlmRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtrl|jjdkrl|jjnd}t	j
|ddV | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r1   r   ZmpscpuF)device_typeZenabledr   r2   )rO   )r   floatr>   r=   rV   r   r   r   strr9   ZautocastrT   rd   rg   r   rh   rO   )
r*   r`   ri   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembrg   rh   r-   r-   r.   r5     s    0&,zGlmRotaryEmbedding.forward)N)r6   r7   r8   r9   r   __annotations__r   r!   Zno_gradr   r5   r;   r-   r-   r+   r.   r      s
   

r   c                       s   e Zd Zeed fddZedddddeje	ej e	ej
 e	e e	e e	ej
 e	eejejf  ee ejd
	ddZ  ZS )GlmDecoderLayerrl   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )Nrl   r   )r    r!   r%   rk   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormrt   r+   r-   r.   r!     s    

zGlmDecoderLayer.__init__ru   rv   rw   rx   NF)	r/   rJ   ri   rv   	use_cacher|   r{   rM   r0   c              
   K   s^   |}	|  |}| jf |||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r/   rJ   ri   rv   r   r|   r{   )r   r   r   r   )r*   r/   rJ   ri   rv   r   r|   r{   rM   Zresidual_r-   r-   r.   r5   !  s&    




zGlmDecoderLayer.forward)NNNFNN)r6   r7   r8   r   r   r!   r   r9   r   r   r   r   boolr   r   r   r5   r;   r-   r-   r+   r.   r     s&   
      r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )GlmPreTrainedModelr"   modelTr   rv   )r/   
attentionsN)r6   r7   r8   r   r   Zbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnZ_can_compile_fullgraphZ_supports_attention_backendr   rk   Z_can_record_outputsr-   r-   r-   r.   r   D  s   
r   c                       st   e Zd Zed fddZeedeej	 eej
 eej	 ee eej eej	 ee ee ed	ddZ  ZS )	GlmModelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r-   )r   ).0rm   r   r-   r.   
<listcomp>`      z%GlmModel.__init__.<locals>.<listcomp>r   r   F)r    r!   Zpad_token_idZpadding_idx
vocab_sizer#   Z	Embeddingr%   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embZgradient_checkpointing	post_initr)   r+   r   r.   r!   Y  s    zGlmModel.__init__N)		input_idsrJ   ri   rv   inputs_embedsr|   r   rM   r0   c              	   K   s   |d u |d uA rt d|d u r*| |}|rB|d u rBt| jd}|d u rz|d urZ| nd}	tj|	|	|jd  |jd}|d u r|	d}t
| j|||||d}
|}| ||}| jd | jj D ] }||f|
||||d|}q| |}t||dS )	Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r   )r"   Zinput_embedsrJ   r|   rv   ri   )rJ   ri   rv   r|   r{   )last_hidden_staterv   )
ValueErrorr   r   r"   Zget_seq_lengthr9   Zaranger=   r   rc   r   r   r   r   r   r   )r*   r   rJ   ri   rv   r   r|   r   rM   Zpast_seen_tokensr[   r/   r{   Zdecoder_layerr-   r-   r.   r5   i  sP    

	

zGlmModel.forward)NNNNNNN)r6   r7   r8   r   r!   r   r   r   r9   r   r   r   r:   r   r   r   r   r5   r;   r-   r-   r+   r.   r   W  s*          r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZeede	e
j e	e
j e	e
j e	e e	e
j e	e
j e	e e	e
j eee
jf ee ed
ddZ  ZS )GlmForCausalLMzlm_head.weightlm_headZcolwise_repr/   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr   )
r    r!   r   r   r   r#   r$   r%   r   r   r)   r+   r-   r.   r!     s
    
zGlmForCausalLM.__init__Nr   )r   rJ   ri   rv   r   labelsr   r|   logits_to_keeprM   r0   c
              
   K   s   | j f |||||||d|
}|j}t|	tr<t|	 dn|	}| |dd|ddf }d}|dur| jf ||| jjd|
}t	|||j
|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, GlmForCausalLM

        >>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   rJ   ri   rv   r   r   r|   N)r   r   r   )lossr   rv   r/   r   )r   r   r   r   slicer   Zloss_functionr"   r   r   rv   r/   r   )r*   r   rJ   ri   rv   r   r   r   r|   r   rM   outputsr/   Zslice_indicesr   r   r-   r-   r.   r5     s0     zGlmForCausalLM.forward)	NNNNNNNNr   )r6   r7   r8   Z_tied_weights_keysZ_tp_planZ_pp_planr!   r   r   r   r9   r   r   r   r:   r   r   r   r   r   r   r5   r;   r-   r-   r+   r.   r     s8   	         r   c                   @   s   e Zd ZdS )GlmForSequenceClassificationNr6   r7   r8   r-   r-   r-   r.   r     s   r   c                   @   s   e Zd ZdS )GlmForTokenClassificationNr   r-   r-   r-   r.   r     s   r   )r   r   r   r   r   )rE   )Nr   )>typingr   r   r   r9   Ztorch.nnr#   Zactivationsr   Zcache_utilsr   r   Z
generationr	   Zintegrationsr
   Zmasking_utilsr   Zmodeling_layersr   r   r   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   Zutils.deprecationr   Zutils.genericr   Zconfiguration_glmr   Moduler   r   r   rD   r   r]   rb   rj   rk   r   r   r   r   r   r   r   r   __all__r-   r-   r-   r.   <module>   sZ    
*E$.NK