a
    hHb                     @   s  d dl mZmZmZ d dlZd dlmZ d dlZddlm	Z	 ddl
mZmZ ddlmZmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z, e -e.Z/G dd deZ0G dd de*Z1G dd de(Z2d(ej3ej4ej4ej4eej4 e5ee5 ee5 e6ej4ej4f d	ddZ7G dd de$Z8G dd deZ9G dd  d e)Z:G d!d" d"e%Z;G d#d$ d$e&Z<G d%d& d&e'Z=g d'Z>dS ))    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)PretrainedConfiglayer_type_validation)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ALL_ATTENTION_FUNCTIONS)Unpack)TransformersKwargslogging)deprecate_kwarg   )	GemmaAttentionGemmaForCausalLMGemmaForSequenceClassificationGemmaForTokenClassificationGemmaMLP
GemmaModelGemmaRMSNormapply_rotary_pos_emb	repeat_kvc                       sf   e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZd$ fd"d#	Z  Z	S )%Gemma2Configa  
    This is the configuration class to store the configuration of a [`Gemma2Model`]. It is used to instantiate an Gemma2
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the Gemma2-7B.
    e.g. [google/gemma2-7b](https://huggingface.co/google/gemma2-7b)
    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.
    Args:
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the Gemma2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Gemma2Model`]
        hidden_size (`int`, *optional*, defaults to 2304):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 9216):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 26):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 4):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        head_dim (`int`, *optional*, defaults to 256):
            The attention head dimension.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
            if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 0):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        query_pre_attn_scalar (`float`, *optional*, defaults to 256):
            scaling factor used on the attention scores
        sliding_window (`int`, *optional*, defaults to 4096):
            in Gemma2, every other layer uses sliding window attention. This is the size of the sliding window.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        final_logit_softcapping (`float`, *optional*, defaults to 30.0):
            scaling factor when applying tanh softcapping on the logits.
        attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
            scaling factor when applying tanh softcapping on the attention scores.

    ```python
    >>> from transformers import Gemma2Model, Gemma2Config
    >>> # Initializing a Gemma2 gemma2-7b style configuration
    >>> configuration = Gemma2Config()
    >>> # Initializing a model from the gemma2-7b style configuration
    >>> model = Gemma2Model(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zgemma2past_key_valuesZcolwiseZrowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm   	   $              gelu_pytorch_tanh    {Gz?ư>Tr      r        @F           N      >@      I@c                    s   t  jf ||||d| || _|	| _|| _|| _|| _|| _|| _|| _	|
| _
|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _| jd u rdd t| jD | _t| j d S )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingsc                 S   s$   g | ]}t |d  d rdndqS )r4   r   sliding_attentionfull_attention)bool).0i rC   e/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/gemma2/modular_gemma2.py
<listcomp>   s   z)Gemma2Config.__init__.<locals>.<listcomp>)super__init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headshead_dimnum_key_value_headsinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_biasattention_dropouthidden_activationquery_pre_attn_scalarsliding_windowfinal_logit_softcappingattn_logit_softcappinglayer_typesranger
   )selfrH   rJ   rK   rL   rM   rO   rN   rV   rI   rP   rQ   rR   r:   r<   r;   r=   rS   rT   rU   rW   rX   r[   rY   rZ   kwargs	__class__rC   rD   rG      sB    
zGemma2Config.__init__)r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   Tr   r4   r   Tr5   Fr6   r/   r7   Nr8   r9   )
__name__
__module____qualname____doc__Z
model_typeZkeys_to_ignore_at_inferenceZbase_model_tp_planZbase_model_pp_planrG   __classcell__rC   rC   r_   rD   r    1   sP   L


                        r    c                   @   s   e Zd ZdS )Gemma2RMSNormNra   rb   rc   rC   rC   rC   rD   rf      s   rf   c                       s   e Zd Z fddZ  ZS )	Gemma2MLPc                    s   t  | t|j | _d S N)rF   rG   r   rV   Zact_fnr]   configr_   rC   rD   rG      s    zGemma2MLP.__init__)ra   rb   rc   rG   re   rC   rC   r_   rD   rh      s   rh   r6   )	modulequerykeyvaluer%   dropoutscalingsoftcapreturnc                 K   s   |d u r| j d }t|| j}	t|| j}
t||	dd| }|d urd|| }t|}|| }|d ur|d d d d d d d |	jd f }|| }tj	j
|dtjd|j}tj	j||| jd}t||
}|dd }||fS )	N      r   r   )dimdtype)ptrainingr4   )rN   r   Znum_key_value_groupstorchmatmul	transposetanhshapennZ
functionalZsoftmaxZfloat32torx   rp   rz   
contiguous)rl   rm   rn   ro   r%   rp   rq   rr   r^   
key_statesvalue_statesattn_weightsZcausal_maskattn_outputrC   rC   rD   eager_attention_forward   s"    

&r   c                       s   e Zd Zeed fddZedddddeje	ejejf e
ej e
e e
ej ee e	eje
ej e
e	ej  f d	d
dZ  ZS )Gemma2Attentionrk   	layer_idxc                    sR   t  || | jj| _| jj| _d| _|jd | _|j| dkrH|j	nd | _	d S )NTrt   r>   )
rF   rG   rk   rZ   rU   Z	is_causalrW   rq   r[   rX   r]   rk   r   r_   rC   rD   rG      s    

zGemma2Attention.__init__past_key_valuer!   4.58new_nameversionN)r$   position_embeddingsr%   r!   cache_positionr^   rs   c                 K   s,  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d ur|||d}||
|| j	|\}
}t
}| jjdkrt| jj }|| |	|
||f| jr| jnd| j| j| jd|\}}|jg |dR   }| |}||fS )Nrv   r4   r   )sincosr   eagerr6   )rp   rq   rX   rr   )r   rN   Zq_projviewr}   Zk_projZv_projr   updater   r   rk   _attn_implementationr   rz   rU   rq   rX   rZ   Zreshaper   Zo_proj)r]   r$   r   r%   r!   r   r^   Zinput_shapeZhidden_shapeZquery_statesr   r   r   r   Zcache_kwargsZattention_interfacer   r   rC   rC   rD   forward  s<    



zGemma2Attention.forward)NN)ra   rb   rc   r    intrG   r   r{   Tensortupler   r   
LongTensorr   r   r   re   rC   rC   r_   rD   r      s     r   c                       s   e Zd Zeed fddZedddddeje	ejejf e
ej e
ej e
e e
e e
e e
ej e	eje
e	ejejf  f d
	ddZ  ZS )Gemma2DecoderLayerr   c                    s   t    |j| _|| _|j| | _t||d| _t|| _	t
|j|jd| _t
|j|jd| _t
|j|jd| _t
|j|jd| _d S )Nr   )eps)rF   rG   rJ   rk   r[   attention_typer   	self_attnrh   mlprf   rQ   input_layernormpost_attention_layernormpre_feedforward_layernormpost_feedforward_layernormr   r_   rC   rD   rG   4  s    

zGemma2DecoderLayer.__init__r   r!   r   r   NF)	r$   r   r%   position_idsr!   output_attentionsrR   r   rs   c	                 K   s   |}
|  |}| jf ||||||||d|	\}}| |}|
| }|}
| |}| |}| |}|
| }|f}|r||f7 }|S )N)r$   r   r%   r   r!   r   rR   r   )r   r   r   r   r   r   )r]   r$   r   r%   r   r!   r   rR   r   r^   ZresidualZself_attn_weightsoutputsrC   rC   rD   r   A  s2    
	





zGemma2DecoderLayer.forward)NNNFFN)ra   rb   rc   r    r   rG   r   r{   r   r   r   r   r   r@   FloatTensorr   re   rC   rC   r_   rD   r   3  s&         r   c                       sx   e Zd Zed fddZdeej eej eej ee	 eej
 ee ee ee eej ee edddZ  ZS )	Gemma2Modelrk   c                    s0   t    t fddt jD | _d S )Nc                    s   g | ]}t  |qS rC   )r   )rA   r   r   rC   rD   rE   s      z(Gemma2Model.__init__.<locals>.<listcomp>)rF   rG   r   Z
ModuleListr\   rL   r'   rj   r_   r   rD   rG   p  s    zGemma2Model.__init__N)r"   r%   r   r!   r#   rR   r   output_hidden_statesr   r^   rs   c
                 K   s  |d ur|n| j j}|d ur |n| j j}|d ur4|n| j j}|d u |d uA rTtd| jrr| jrr|rrtd d}|d u r| 	|}|r|d u r| jst
| j d}|	d u r|d ur| nd}tj|||jd  |jd}	|d u r|	d}t| }ts.| j |||	||d}tf i |tf i |d	}|}| ||}tj| j jd
 |jd}|| }|rhdnd }|rvdnd }| jd | j j D ]X}|r||f7 }||f|||j |||||	d|
}|d }|r||d f7 }q| |}|r ||f7 }t||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r4   )device)rk   Zinput_embedsr%   r   r!   r   )r?   r>   g      ?)rx   rC   )r   r%   r   r!   r   rR   r   )last_hidden_stater!   r$   
attentions)rk   r   r   rR   
ValueErrorZgradient_checkpointingrz   loggerwarning_oncer&   r   Zget_seq_lengthr{   Zaranger   r   Z	unsqueeze
isinstancedictr   r   Z
rotary_embZtensorrJ   rx   r'   rL   r   r(   r   )r]   r"   r%   r   r!   r#   rR   r   r   r   r^   Zpast_seen_tokensZcausal_mask_mappingZmask_kwargsr$   r   Z
normalizerZall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsrC   rC   rD   r   v  s    



	

zGemma2Model.forward)	NNNNNNNNN)ra   rb   rc   r    rG   r   r{   r   r   r   r   r@   r   r   r   r   re   rC   rC   r_   rD   r   o  s.            r   c                       s   e Zd Z fddZdeej eej eej ee eej	 eej ee
 ee
 ee
 eej eeejf edddZ  ZS )	Gemma2ForCausalLMc                    s"   t  | t|| _|   d S ri   )rF   rG   r   modelZ	post_initrj   r_   rC   rD   rG     s    
zGemma2ForCausalLM.__init__Nr   )r"   r%   r   r!   r#   labelsrR   r   r   r   logits_to_keeprs   c                 K   s  | j r(| jjdkr(td| jj d |dur4|n| jj}|	durH|	n| jj}	| jf ||||||||	|
d	|}|j}t	|t
rt| dn|}| |dd|ddf }| jjdur|| jj }t|}|| jj }d}|dur| j||| jfi |}t|||j|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, Gemma2ForCausalLM

        >>> model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-9b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```r   zhIt is strongly recommended to train Gemma2 models with the `eager` attention implementation instead of `zp`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`.N)	r"   r%   r   r!   r#   rR   r   r   r   )losslogitsr!   r$   r   )rz   rk   r   r   r   r   r   r   r   r   r   sliceZlm_headrY   r{   r~   Zloss_functionrH   r   r!   r$   r   )r]   r"   r%   r   r!   r#   r   rR   r   r   r   r   r^   r   r$   Zslice_indicesr   r   rC   rC   rD   r     sN    !


zGemma2ForCausalLM.forward)NNNNNNNNNNr   )ra   rb   rc   rG   r   r{   r   r   r   r   r@   r   r   r   r   re   rC   rC   r_   rD   r     s4              r   c                   @   s   e Zd ZdS )Gemma2ForSequenceClassificationNrg   rC   rC   rC   rD   r   8  s   r   c                   @   s   e Zd ZdS )Gemma2ForTokenClassificationNrg   rC   rC   rC   rD   r   <  s   r   )r    r   r   ZGemma2PreTrainedModelr   r   )r6   NN)?typingr   r   r   r{   Ztorch.nnr   Ztorch.utils.checkpointZactivationsr   Zcache_utilsr   r   Zconfiguration_utilsr	   r
   Zmasking_utilsr   r   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_utilsr   Zprocessing_utilsr   utilsr   r   Zutils.deprecationr   Zgemma.modeling_gemmar   r   r   r   r   r   r   r   r   Z
get_loggerra   r   r    rf   rh   Moduler   floatr   r   r   r   r   r   r   r   __all__rC   rC   rC   rD   <module>   sR   ,
    #8<uT