a
    ha                  
   @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddlmZm Z m!Z!m"Z" ddl#m$Z$ ddl%m&Z& ddl'm(Z( e")e*Z+dd Z,d1ddZ-ej.e/ej.dddZ0d2ej1ej.ej.ej.eej. e2e2ee dddZ3G dd  d ej1Z4ed!G d"d# d#ej1Z5G d$d% d%ej1Z6G d&d' d'eZ7e G d(d) d)eZ8G d*d+ d+ej1Z9e G d,d- d-e8Z:e G d.d/ d/e8eZ;g d0Z<dS )3    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuplelogging)deprecate_kwarg)check_model_inputs   )GraniteConfigc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1Zx2 r&   h/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/granite/modeling_granite.pyrotate_half.   s    r(   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer(   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr&   r&   r'   apply_rotary_pos_emb5   s
    

r/   )hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r!   expandreshape)r0   r1   batchnum_key_value_headsslenhead_dimr&   r&   r'   	repeat_kvP   s
    0r9           )modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r   )r    dtype)ptrainingr   )r9   num_key_value_groupsr"   matmul	transposer!   r   Z
functionalZsoftmaxfloat32torD   rA   rF   
contiguous)r;   r<   r=   r>   r?   r@   rA   rB   
key_statesvalue_statesattn_weightscausal_maskattn_outputr&   r&   r'   eager_attention_forward\   s    
&rR   c                       s   e Zd ZdZdeee d fddZedddd	de	j
ee	j
e	j
f ee	j
 ee ee	j ee ee	j
e	j
f d
ddZ  ZS )GraniteAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNconfig	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j
| _|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr8   TZbias)super__init__rU   rV   getattrhidden_sizeZnum_attention_headsr8   r6   rG   Zattention_multiplierr@   attention_dropoutZ	is_causalr   LinearZattention_biasq_projk_projv_projo_projselfrU   rV   	__class__r&   r'   rY   y   s(    
zGraniteAttention.__init__past_key_valuepast_key_values4.58new_nameversion)r0   position_embeddingsr?   rg   cache_positionrB   r2   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d ur|||d}||
|| j	|\}
}t
}| jjdkrt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )Nr   r   r   )r-   r,   rm   eagerr:   )rA   r@   )r!   r8   r^   viewrI   r_   r`   r/   updaterV   rR   rU   Z_attn_implementationr   rF   r\   r@   r4   rL   ra   )rc   r0   rl   r?   rg   rm   rB   Zinput_shapeZhidden_shapeZquery_statesrM   rN   r,   r-   Zcache_kwargsZattention_interfacerQ   rO   r&   r&   r'   forward   s8    


zGraniteAttention.forward)N)NN)__name__
__module____qualname____doc__r   r   intrY   r   r"   Tensortupler   
LongTensorr   r   rq   __classcell__r&   r&   rd   r'   rS   v   s     rS   ZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteRMSNormư>c                    s&   t    tt|| _|| _dS )z=
        GraniteRMSNorm is equivalent to T5LayerNorm
        N)rX   rY   r   	Parameterr"   Zonesweightvariance_epsilon)rc   r[   epsrd   r&   r'   rY      s    
zGraniteRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r   T)Zkeepdim)	rD   rK   r"   rJ   powmeanZrsqrtr   r~   )rc   r0   Zinput_dtypeZvariancer&   r&   r'   rq      s
    zGraniteRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)rx   r~   r!   r   )rc   r&   r&   r'   
extra_repr   s    zGraniteRMSNorm.extra_repr)r|   )rr   rs   rt   rY   rq   r   rz   r&   r&   rd   r'   r{      s   r{   c                       s$   e Zd Z fddZdd Z  ZS )
GraniteMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )NrW   )rX   rY   rU   r[   Zintermediate_sizer   r]   Zmlp_bias	gate_projup_proj	down_projr   Z
hidden_actact_fnrc   rU   rd   r&   r'   rY      s    
zGraniteMLP.__init__c                 C   s$   |  | | || | }|S )N)r   r   r   r   )rc   r$   r   r&   r&   r'   rq      s     zGraniteMLP.forward)rr   rs   rt   rY   rq   rz   r&   r&   rd   r'   r      s   
r   c                       s   e Zd Zeed fddZedddddeje	ej e	ej
 e	e e	e e	e e	ej
 e	eejejf  eeje	eejejf  f d
	ddZ  ZS )GraniteDecoderLayerrT   c                    sZ   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| _d S )NrT   r   )rX   rY   r[   rS   	self_attnr   mlpr{   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierrb   rd   r&   r'   rY      s    

zGraniteDecoderLayer.__init__rf   rg   rh   ri   NF)	r0   r?   r.   rg   output_attentions	use_cacherm   rl   r2   c	                 K   s   |}
|  |}| jf ||||||||d|	\}}|
|| j  }|}
| |}| |}|
|| j  }|f}|r|||f7 }|S )a/  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r0   r?   r.   rg   r   r   rm   rl   )r   r   r   r   r   )rc   r0   r?   r.   rg   r   r   rm   rl   rB   ZresidualZself_attn_weightsoutputsr&   r&   r'   rq      s.    #
	



zGraniteDecoderLayer.forward)NNNFFNN)rr   rs   rt   r   rv   rY   r   r"   rw   r   ry   r   boolrx   FloatTensorrq   rz   r&   r&   rd   r'   r      s(   
       r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )GranitePreTrainedModelrU   modelTr   rg   )r0   
attentionsN)rr   rs   rt   r   __annotations__Zbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnZ_can_compile_fullgraphZ_supports_attention_backendr   rS   Z_can_record_outputsr&   r&   r&   r'   r   0  s   
r   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	GraniteRotaryEmbeddinginv_freqNrU   c                    s   t    t|dr:t|jtr:|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr   F)
persistent)rX   rY   hasattr
isinstancer   dictgetr   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrU   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)rc   rU   devicer   rd   r&   r'   rY   F  s    
zGraniteRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtrl|jjdkrl|jjnd}t	j
|ddV | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r   r   ZmpscpuF)device_typeZenabledr   r   )rD   )r   floatr3   r!   rK   r   r   r   strr"   ZautocastrI   r#   r,   r   r-   rD   )
rc   r$   r.   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembr,   r-   r&   r&   r'   rq   W  s    0&,zGraniteRotaryEmbedding.forward)N)rr   rs   rt   r"   rw   r   r   rY   Zno_gradr   rq   rz   r&   r&   rd   r'   r   C  s
   

r   c                       s   e Zd Zed fddZeedeej	 eej
 eej	 ee eej ee ee ee eej	 ee edddZ  ZS )	GraniteModelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r&   )r   ).0rV   r   r&   r'   
<listcomp>p      z)GraniteModel.__init__.<locals>.<listcomp>r   r   F)rX   rY   Zpad_token_idZpadding_idx
vocab_sizer   Z	Embeddingr[   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr{   r   normr   
rotary_embgradient_checkpointingembedding_multiplier	post_initr   rd   r   r'   rY   i  s    zGraniteModel.__init__N)	input_idsr?   r.   rg   inputs_embedsr   r   output_hidden_statesrm   rB   r2   c
                 K   s  |d ur|n| j j}|d ur |n| j j}|d ur4|n| j j}|d u |d uA rTtd| jrr| jrr|rrtd d}|d u r| 	|}|| j
 }|r|d u rt| j d}|	d u r|d ur| nd}tj|||jd  |jd}	|d u r|	d}t| j |||	||d}|}| ||}|r d	nd }|r.d	nd }| jd | j j D ]R}|rX||f7 }||f||||||	|d
|
}|d }|rD||d f7 }qD| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   )r   )rU   Zinput_embedsr?   rm   rg   r.   r&   )r?   r.   rg   r   r   rm   rl   )last_hidden_staterg   r0   r   )rU   r   r   r   
ValueErrorr   rF   loggerZwarning_oncer   r   r	   Zget_seq_lengthr"   Zaranger!   r   r)   r   r   r   r   r   r   )rc   r   r?   r.   rg   r   r   r   r   rm   rB   Zpast_seen_tokensrP   r0   rl   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr&   r&   r'   rq   z  s~    


	
	

zGraniteModel.forward)	NNNNNNNNN)rr   rs   rt   r   rY   r   r   r   r"   ry   rw   r   r   r   r   r   r   rq   rz   r&   r&   rd   r'   r   g  s2            r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZeede	e
j e	e
j e	e
j e	eeee
j f  e	e
j e	e
j e	e e	e e	e e	e
j eee
jf ee ed
ddZ  ZS )GraniteForCausalLMzlm_head.weightlm_headZcolwise_repr0   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFrW   )
rX   rY   r   r   r   r   r]   r[   r   r   r   rd   r&   r'   rY     s
    
zGraniteForCausalLM.__init__Nr   )r   r?   r.   rg   r   labelsr   r   r   rm   logits_to_keeprB   r2   c                 K   s   |dur|n| j j}|	dur |	n| j j}	| jf ||||||||	|
d	|}|j}t|trht| dn|}| |dd|ddf }|| j j	 }d}|dur| j
f ||| j jd|}t|||j|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteForCausalLM

        >>> model = GraniteForCausalLM.from_pretrained("meta-granite/Granite-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-granite/Granite-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r?   r.   rg   r   r   r   r   rm   )r   r   r   )lossr   rg   r0   r   )rU   r   r   r   r   r   rv   slicer   Zlogits_scalingZloss_functionr   r   rg   r0   r   )rc   r   r?   r.   rg   r   r   r   r   r   rm   r   rB   r   r0   Zslice_indicesr   r   r&   r&   r'   rq     s<    "
zGraniteForCausalLM.forward)NNNNNNNNNNr   )rr   rs   rt   Z_tied_weights_keysZ_tp_planZ_pp_planrY   r   r   r   r"   ry   rw   r   r   listr   r   rv   r   r   r   rq   rz   r&   r&   rd   r'   r     s@   	           r   )r   r   r   )Nr   )r:   )=typingr   r   r   r"   r   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zintegrationsr   Zmasking_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   Zutils.deprecationr   Zutils.genericr   Zconfiguration_graniter   Z
get_loggerrr   r   r(   r/   rw   rv   r9   Moduler   rR   rS   r{   r   r   r   r   r   r   __all__r&   r&   r&   r'   <module>   sX   

 GN$vV