a
    h2_                  
   @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddlmZm Z m!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z' G dd dej(Z)G dd dej(Z*G dd dej(Z+ej,e-ej,dddZ.d0ej(ej,ej,ej,eej, e/e/ee ddd Z0d!d" Z1d1d#d$Z2G d%d& d&ej(Z3G d'd( d(eZ4e G d)d* d*eZ5e G d+d, d,e5Z6e G d-d. d.e5eZ7g d/Z8dS )2    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )CohereConfigc                       s&   e Zd Zd fdd	Zdd Z  ZS )	CohereLayerNormNh㈵>Fc                    s&   t    tt|| _|| _dS )zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	ParametertorchZonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__ f/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/cohere/modeling_cohere.pyr   4   s    
zCohereLayerNorm.__init__c                 C   sl   |j }|tj}|jddd}|| djddd}|| t|| j  }| jtj| }||S )NT)Zkeepdim   )	dtypetor!   float32meanpowZrsqrtr#   r"   )r$   hidden_statesZinput_dtyper1   Zvariancer*   r*   r+   forward:   s    zCohereLayerNorm.forward)Nr   F__name__
__module____qualname__r   r4   __classcell__r*   r*   r(   r+   r   3   s   r   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	CohereRotaryEmbeddinginv_freqNconfigc                    s   t    t|dr:t|jtr:|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr;   F)
persistent)r   r   hasattr
isinstancer>   dictgetr?   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr=   r   Zrope_init_fnattention_scalingZregister_bufferr;   Zoriginal_inv_freq)r$   r=   devicer;   r(   r*   r+   r   G   s    
zCohereRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd}|d d d d d f  }t|jjtrd|jjdkrd|jjnd}tj	|ddT | |  
dd}tj|ddd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r,   r   ZmpscpuF)device_typeZenabledr-   dimr.   )r;   floatexpandshaperD   rH   r@   strr!   Zautocast	transposeZrepeat_interleavecosrG   sinr/   r.   )
r$   xposition_idsZinv_freq_expandedZposition_ids_expandedrJ   ZfreqsZembrS   rT   r*   r*   r+   r4   X   s    (&,zCohereRotaryEmbedding.forward)N)r6   r7   r8   r!   Tensor__annotations__r   r   Zno_gradr   r4   r9   r*   r*   r(   r+   r:   D   s
   

r:   c                       s$   e Zd Z fddZdd Z  ZS )	CohereMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr'   )r   r   r=   r%   Zintermediate_sizer   Linear	gate_projup_proj	down_projr   Z
hidden_actact_fnr$   r=   r(   r*   r+   r   i   s    
zCohereMLP.__init__c                 C   s$   |  | | || | }|S )N)r_   r`   r]   r^   )r$   rU   r_   r*   r*   r+   r4   s   s     zCohereMLP.forwardr5   r*   r*   r(   r+   rY   h   s   
rY   )r3   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rP   rO   reshape)r3   rb   batchnum_key_value_headsslenhead_dimr*   r*   r+   	repeat_kvx   s
    0ri           )modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr-   r   r,   )rL   r.   )ptrainingr   )ri   num_key_value_groupsr!   matmulrR   rP   r   Z
functionalZsoftmaxr0   r/   r.   rq   ru   
contiguous)rk   rl   rm   rn   ro   rp   rq   rr   
key_statesvalue_statesattn_weightscausal_maskattn_outputr*   r*   r+   eager_attention_forward   s    
&r~   c                 C   sB   | dd d df }| ddd df }t j| |gddd}|S )N.r-   r   r,   rK   rs   )r!   stackflatten)rU   x1Zx2Zrot_xr*   r*   r+   rotate_half   s    r   c           	      C   sj   | j }|  } | }||}||}| | t| |  }|| t||  }|j|d|j|dfS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    rM   )r.   rN   	unsqueezer   r/   )	qkrS   rT   rV   Zunsqueeze_dimr.   Zq_embedZk_embedr*   r*   r+   apply_rotary_pos_emb   s    

r   c                       s   e Zd ZdZdeee d fddZedddd	de	j
ee	j
e	j
f ee	j
 ee ee	j ee ee	j
ee	j
 f d
ddZ  ZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr=   	layer_idxc                    s  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _|j| _| jrt|j| jf|jd| _t|j| jf|jd| _d S )Nrh   g      Tr[   r%   r&   )r   r   r=   r   getattrr%   Znum_attention_headsrh   rf   rv   rp   attention_dropoutZ	is_causalr   r\   Zattention_biasq_projk_projv_projo_projuse_qk_normr   layer_norm_epsq_normk_normr$   r=   r   r(   r*   r+   r      s8    
zCohereAttention.__init__past_key_valuepast_key_values4.58new_nameversion)r3   position_embeddingsro   r   cache_positionrr   rc   c                 K   sL  |j d d }g |d| jR }| ||}	| ||}
| ||}| jrl| |	}	| |
}
|		dd}	|
	dd}
|	dd}|\}}t
|	|
||\}	}
|d ur|||d}||
|| j|\}
}t}| jjdkrt| jj }|| |	|
||f| js
dn| j| jd|\}}|jg |dR   }| |}||fS )Nr,   r   r-   )rT   rS   r   eagerrj   )rq   rp   )rP   rh   r   viewr   r   r   r   r   rR   r   updater   r~   r=   Z_attn_implementationr   ru   r   rp   rd   rx   r   )r$   r3   r   ro   r   r   rr   Zinput_shapeZhidden_shapeZquery_statesry   rz   rS   rT   Zcache_kwargsZattention_interfacer}   r{   r*   r*   r+   r4      sD    




zCohereAttention.forward)N)NN)r6   r7   r8   __doc__r   r   intr   r   r!   rW   tupler   
LongTensorr   r   r4   r9   r*   r*   r(   r+   r      s      r   c                       s   e Zd Zeed fddZedddddeje	ej e	ej
 e	e e	e e	ej
 e	eejejf  ee eeje	eejejf  f d
	ddZ  ZS )CohereDecoderLayerr   c                    s@   t    |j| _t||d| _t|| _t|j|jd| _	d S )Nr   r   )
r   r   r%   r   	self_attnrY   mlpr   r   input_layernormr   r(   r*   r+   r     s
    

zCohereDecoderLayer.__init__r   r   r   r   NF)	r3   ro   rV   r   	use_cacher   r   rr   rc   c              
   K   sL   |}	|  |}| jf |||||||d|\}
}| |}|	|
 | }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r3   ro   rV   r   r   r   r   )r   r   r   )r$   r3   ro   rV   r   r   r   r   rr   ZresidualZhidden_states_attention_Zhidden_states_mlpr*   r*   r+   r4   $  s     


zCohereDecoderLayer.forward)NNNFNN)r6   r7   r8   r   r   r   r   r!   rW   r   r   r   boolr   r   r   FloatTensorr4   r9   r*   r*   r(   r+   r     s&         r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )CoherePreTrainedModelr=   modelTr   r   )r3   
attentionsN)r6   r7   r8   r   rX   Zbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnZ_can_compile_fullgraphZ_supports_attention_backendr   r   Z_can_record_outputsr*   r*   r*   r+   r   V  s   
r   c                       st   e Zd Zed fddZeedeej	 eej
 eej	 ee eej eej	 ee ee ed	ddZ  ZS )	CohereModelr<   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r*   )r   ).0r   r<   r*   r+   
<listcomp>r      z(CohereModel.__init__.<locals>.<listcomp>r   r<   F)r   r   Zpad_token_idZpadding_idx
vocab_sizer   Z	Embeddingr%   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   r   normr:   
rotary_embZgradient_checkpointing	post_initra   r(   r<   r+   r   k  s    zCohereModel.__init__N)		input_idsro   rV   r   inputs_embedsr   r   rr   rc   c              	   K   s   |d u |d uA rt d|d u r*| |}|rB|d u rBt| jd}|d u rz|d urZ| nd}	tj|	|	|jd  |jd}|d u r|	d}t
| j|||||d}
|}| ||}| jd | jj D ] }||f|
||||d|}q| |}t||dS )	Nz:You must specify exactly one of input_ids or inputs_embedsr<   r   r   )rH   )r=   Zinput_embedsro   r   r   rV   )ro   rV   r   r   r   )last_hidden_stater   )
ValueErrorr   r	   r=   Zget_seq_lengthr!   ZarangerP   rH   r   r   r   r   r   r   r   )r$   r   ro   rV   r   r   r   r   rr   Zpast_seen_tokensr|   r3   r   Zdecoder_layerr*   r*   r+   r4   {  sP    

	

zCohereModel.forward)NNNNNNN)r6   r7   r8   r   r   r   r   r   r!   r   rW   r   r   r   r   r   r   r4   r9   r*   r*   r(   r+   r   i  s*          r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZeede	e
j e	e
j e	e
j e	eeee
j f  e	e
j e	e
j e	e e	e e	e e	e
j eee
jf ee ed
ddZ  ZS )CohereForCausalLMzlm_head.weightlm_headZcolwise_repr3   logitsc                    sP   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _
|   d S rZ   )r   r   r   r   r   r   r\   r%   r   logit_scaleZtie_word_embeddingsr   ra   r(   r*   r+   r     s    
zCohereForCausalLM.__init__Nr   )r   ro   rV   r   r   labelsr   output_attentionsoutput_hidden_statesr   logits_to_keeprr   rc   c                 K   s   |dur|n| j j}|	dur |	n| j j}	| jf ||||||||	|
d	|}|j}t|trht| dn|}| |dd|ddf }|| j	 }d}|dur| j
f ||| j jd|}t|||j|j|jdS )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   ro   rV   r   r   r   r   r   r   )r   r   r   )lossr   r   r3   r   )r=   r   r   r   r   rD   r   slicer   r   Zloss_functionr   r   r   r3   r   )r$   r   ro   rV   r   r   r   r   r   r   r   r   rr   outputsr3   Zslice_indicesr   r   r*   r*   r+   r4     s<    '

zCohereForCausalLM.forward)NNNNNNNNNNr   )r6   r7   r8   Z_tied_weights_keysZ_tp_planZ_pp_planr   r   r   r   r!   r   rW   r   r   listr   r   r   r   r   r   r4   r9   r*   r*   r(   r+   r     s@              r   )r   r   r   )rj   )Nr   )9typingr   r   r   r!   r   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zmasking_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   Zutils.deprecationr   Zutils.genericr   Zconfiguration_coherer   Moduler   r:   rY   rW   r   ri   rN   r~   r   r   r   r   r   r   r   __all__r*   r*   r*   r+   <module>   sT   $ 
X:N]