a
    h`                  	   @   sR  d dl mZmZmZ d dlZd dlm  mZ d dlmZ ddl	m
Z
 ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZ ddlmZm Z m!Z! ddl"m#Z# ddl$m%Z% e  rd dl&m'Z' ddl(m)Z) e!*e+Z,d7eej-e.ej- df ee/ eej- eej-e/f dddZ0G dd dej1Z2G dd dej1Z3dd Z4d8ddZ5G dd  d ej1Z6G d!d" d"ej1Z7G d#d$ d$ej1Z8ej-e/ej-d%d&d'Z9G d(d) d)ej1Z:d9ej1ej-ej-ej-eej- e;e;d+d,d-Z<G d.d/ d/eZ=eG d0d1 d1eZ>eG d2d3 d3e>Z?G d4d5 d5e>eZ@g d6ZAdS ):    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)GradientCheckpointingLayer)BaseModelOutputWithPastMoeCausalLMOutputWithPastMoeModelOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)auto_docstringis_torch_flex_attn_availablelogging)deprecate_kwarg   )GraniteMoeConfig)	BlockMask)make_flex_block_causal_mask   )gate_logitsnum_expertsattention_maskreturnc                    s  | du st | tsdS t | trF| d j tj fdd| D dd}tjjj|dd}tj||dd\}}tjj	||}|du rtj
| dd}	tj
|dd}
n|j\}}|jd ||  }|dddddddf |||||fd|| }tj| | ddtj|dd }	|ddddddf ||||jd fd|jd  }tj|| ddtj|dd }
|jjdur|jjnd}|jd t| }t|	dd|||jd  f |
d }|| S )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   c                    s   g | ]}|  qS  )to).0Z
layer_gateZcompute_devicer!   n/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/granitemoe/modeling_granitemoe.py
<listcomp>O       z,load_balancing_loss_func.<locals>.<listcomp>dimr   )
isinstancetupledevicetorchcatr   
functionalsoftmaxtopkZone_hotmeanfloatshapeexpandreshaper"   sumindexint	unsqueeze)r   r   top_kr   Zconcatenated_gate_logitsZrouting_weights_Zselected_expertsZexpert_maskZtokens_per_expertZrouter_prob_per_expert
batch_sizesequence_lengthnum_hidden_layersZexpert_attention_maskZ router_per_expert_attention_maskZdevice_indexZrankZoverall_lossr!   r$   r%   load_balancing_loss_func-   sR    



&rA   c                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteMoeRMSNormư>c                    s&   t    tt|| _|| _dS )z@
        GraniteMoeRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parameterr.   Zonesweightvariance_epsilon)selfhidden_sizeeps	__class__r!   r%   rE      s    
zGraniteMoeRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r*   T)Zkeepdim)	dtyper"   r.   float32powr3   ZrsqrtrH   rG   )rI   hidden_statesZinput_dtypeZvariancer!   r!   r%   forward   s
    zGraniteMoeRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r,   rG   r5   rH   )rI   r!   r!   r%   
extra_repr   s    zGraniteMoeRMSNorm.extra_repr)rC   )__name__
__module____qualname__rE   rR   rS   __classcell__r!   r!   rL   r%   rB      s   rB   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	GraniteMoeRotaryEmbeddinginv_freqNconfigc                    s   t    t|dr:t|jtr:|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultrY   F)
persistent)rD   rE   hasattrr+   r\   dictgetr]   max_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr[   r   Zrope_init_fnattention_scalingZregister_bufferrY   Zoriginal_inv_freq)rI   r[   r-   rY   rL   r!   r%   rE      s    
z"GraniteMoeRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtrl|jjdkrl|jjnd}t	j
|ddV | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r*   r   ZmpscpuF)device_typeZenabledr   r(   )rN   )rY   r4   r6   r5   r"   r-   r+   r^   strr.   Zautocast	transposer/   cosre   sinrN   )
rI   xposition_idsZinv_freq_expandedZposition_ids_expandedrg   ZfreqsZembrj   rk   r!   r!   r%   rR      s    0&,z!GraniteMoeRotaryEmbedding.forward)N)rT   rU   rV   r.   Tensor__annotations__r   rE   Zno_gradr   rR   rW   r!   r!   rL   r%   rX      s
   

rX   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr*   r   r(   )r5   r.   r/   )rl   x1Zx2r!   r!   r%   rotate_half   s    rq   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r;   rq   )qkrj   rk   rm   Zunsqueeze_dimZq_embedZk_embedr!   r!   r%   apply_rotary_pos_emb   s
    

rt   c                       s0   e Zd Zeeedd fddZdd Z  ZS )GraniteMoeParallelExpertsN)r   
input_sizeoutput_sizer    c                    s6   t    tt|||| _|| _|| _|| _	dS )a  
        Initialize the GraniteMoeParallelExperts module.
        The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
        many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
        [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
        [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
        used in vllm.

        Args:
            num_experts (int):
                Number of experts.
            input_size (int):
                Size of the input.
            output_size (int):
                Size of the output.
        N)
rD   rE   r   rF   r.   emptyrG   r   rv   rw   )rI   r   rv   rw   rL   r!   r%   rE      s
    
z"GraniteMoeParallelExperts.__init__c                 C   sP   |j |dd}g }t| jD ] }|t|| | j|  qtj|dd}|S )a  
        Forward pass of the GraniteMoeParallelExperts module.

        Args:
            inputs (Tensor):
                Input tensor.
            expert_size:
                Expert size information.

        Returns:
            Tensor: Output tensor.
        r   r(   )	splitranger   appendFZlinearrG   r.   r/   )rI   inputsexpert_sizeZ
input_listZoutput_listiresultsr!   r!   r%   rR      s    z!GraniteMoeParallelExperts.forwardrT   rU   rV   r:   rE   rR   rW   r!   r!   rL   r%   ru      s   ru   c                       s.   e Zd Zeeed fddZdd Z  ZS )GraniteMoeTopKGatingrv   r   r<   c                    s2   t    || _|| _|| _tj||dd| _dS )a  
        Initialize the top-k gating mechanism.
        Args:
            input_size (`int`):
                Size of the input.
            num_experts (`int`):
                Number of experts.
            top_k (`int`):
                Number of top experts to select.
        FZbiasN)rD   rE   r   rv   r<   r   Linearlayer)rI   rv   r   r<   rL   r!   r%   rE     s
    
zGraniteMoeTopKGating.__init__c                 C   s   |  | }|j| jdd\}}tj|dd|}tj|d| j	g|j
|jd}|d|d}| d}| }| }	|	d\}
}|j| jdd}| }|| }|||||fS )Nr   r(   r   rN   r-   trunc)Zrounding_mode)r   r4   r2   r<   r.   r1   Ztype_aszerossizer   rN   r-   Zscatterlongr8   tolistflattensortdiv)rI   rQ   logitsZtop_k_logitsZtop_k_indicesZtop_k_gatesr   Zgatesr~   Ztop_k_expertsr=   Zindex_sorted_expertsbatch_indexbatch_gatesr!   r!   r%   rR   $  s    zGraniteMoeTopKGating.forwardr   r!   r!   rL   r%   r     s   r   c                       s.   e Zd ZdZed fddZdd Z  ZS )GraniteMoeMoEz
    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.

    Args:
        config:
            Configuration object with model hyperparameters.
    rZ   c                    sl   t    |j| _|j| _t|j | _t|j	| j| jd | _
t|j	| j| j| _t| j|j	|jd| _d S )Nr   r   )rD   rE   rJ   rv   Zintermediate_sizer   Z
hidden_act
activationru   num_local_expertsinput_linearoutput_linearr   num_experts_per_tokrouterrI   r[   rL   r!   r%   rE   I  s    
zGraniteMoeMoE.__init__c                 C   s   |  \}}}|d|}| |\}}}}}	|| }
| |
|}|jddd}| |d |d  }| ||}||dddf  }tj|| | j	f|j
|jd}|d||}|||| j	}||	fS )a  
        Forward pass of the mixture of experts layer.

        Args:
            layer_input (Tensor):
                Input tensor.

        Returns:
            Tensor:
                Output tensor.
            Tensor:
                Router logits.
        r*   r   r(   r   r   Nr   )r   r7   r   r   chunkr   r   r.   r   rv   rN   r-   Z	index_addview)rI   Zlayer_inputbszlengthZemb_sizer=   r   r   r~   router_logitsZexpert_inputsrQ   Zchunked_hidden_statesZexpert_outputsr   Zlayer_outputr!   r!   r%   rR   X  s    zGraniteMoeMoE.forward)rT   rU   rV   __doc__r   rE   rR   rW   r!   r!   rL   r%   r   @  s   r   )rQ   n_repr    c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r5   r6   r7   )rQ   r   batchnum_key_value_headsslenhead_dimr!   r!   r%   	repeat_kvy  s
    0r   c                       s   e Zd ZdZdeee d fddZedddd	de	j
ee	j
 ee	j ee eee	j eee	j
e	j
f  ee	j
ee	j
 eee	j
  f dddZ  ZS )GraniteMoeAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr[   	layer_idxc                    s   t    || _|| _|d u r4td| jj d |j| _|j	| _	|j
| _| j	| j | _|j| _| j| j | _d| _|j| _| j| j | j	krtd| j	 d| j dtj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j	|jd| _d S )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   )rD   rE   r[   r   loggerwarning_oncerM   rT   attention_dropoutrJ   num_attention_heads	num_headsr   r   num_key_value_groupsZ	is_causalZattention_multiplierscaling
ValueErrorr   r   Zattention_biasq_projk_projv_projo_projrI   r[   r   rL   r!   r%   rE     s2    

zGraniteMoeAttention.__init__past_key_valuepast_key_values4.58new_nameversionF)rQ   r   rm   r   	use_cachecache_positionposition_embeddingsr    c                 K   sF  |  \}	}
}| |}| |}| |}||	|
| j| jdd}||	|
| j| jdd}||	|
| j| jdd}|d ur|nd\}}|d urt	||||\}}|d ur|||d}|
||| j|\}}t}| jjdkrt| jj }|| ||||f| jsdn| j| jd|\}}||	|
d}| |}||fS )	Nr   r   )NN)rk   rj   r   eager        )dropoutr   r*   )r   r   r   r   r   r   r   ri   r   rt   updater   eager_attention_forwardr[   _attn_implementationr   trainingr   r   r   )rI   rQ   r   rm   r   r   r   r   kwargsr   Zq_lenr=   Zquery_states
key_statesvalue_statesrj   rk   Zcache_kwargsZattention_interfaceattn_outputattn_weightsr!   r!   r%   rR     s>    




zGraniteMoeAttention.forward)N)NNNFNN)rT   rU   rV   r   r   r   r:   rE   r   r.   rn   
LongTensorr   boolr,   rR   rW   r!   r!   rL   r%   r     s&          r   r   )modulequerykeyvaluer   r   r   c                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r*   )r)   rN   )pr   r   )r   r   r.   matmulri   r5   r   r0   r1   rO   r"   rN   r   r   
contiguous)r   r   r   r   r   r   r   r   r   r   r   causal_maskr   r!   r!   r%   r     s    
&r   c                       s   e Zd Zeed fddZedddddeje	ej e	ej
 e	e e	e e	e e	ej
 e	e e	eejejf  eeje	eejejf  f d

ddZ  ZS )GraniteMoeDecoderLayerr   c                    sd   t    |j| _t||d| _|jdkr4t|| _t|j|j	d| _
t|j|j	d| _|j| _d S )Nr   r   rK   )rD   rE   rJ   r   	self_attnr   r   block_sparse_moerB   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierr   rL   r!   r%   rE     s    


zGraniteMoeDecoderLayer.__init__r   r   r   r   NF)
rQ   r   rm   r   output_attentionsr   r   output_router_logitsr   r    c
                 K   s   |}|  |}| jf ||||||||	d|
\}}||| j  }|}| |}| |\}}||| j  }|f}|r||f7 }|r||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )rQ   r   rm   r   r   r   r   r   )r   r   r   r   r   )rI   rQ   r   rm   r   r   r   r   r   r   r   ZresidualZself_attn_weightsr   outputsr!   r!   r%   rR     s2    '
	



zGraniteMoeDecoderLayer.forward)NNNFFNFN)rT   rU   rV   r   r:   rE   r   r.   rn   r   r   r   r   r,   FloatTensorrR   rW   r!   r!   rL   r%   r     s,           r   c                       sF   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZ fddZ  ZS )	GraniteMoePreTrainedModelr[   modelTr   r   Fc                    s0   t  | t|tr,|jjjd| jjd d S )Nr   )r3   Zstd)	rD   _init_weightsr+   ru   rG   dataZnormal_r[   Zinitializer_range)rI   r   rL   r!   r%   r   ^  s    
z'GraniteMoePreTrainedModel._init_weights)rT   rU   rV   r   ro   Zbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_can_compile_fullgraphr   rW   r!   r!   rL   r%   r   R  s   
r   c                       s   e Zd Zed fddZedeej eej	 eej ee
eeej f  eej ee ee ee ee ee eej e
eef dddZde
ej	d	f ej	ej	eed
ddZeej	eeejej	edddZ  ZS )GraniteMoeModelrZ   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _d| _ j| _ j| _ j| _| j| j | _ j| _ j| _ j| _| jdkrt nd | _|   d S )Nc                    s   g | ]}t  |qS r!   )r   )r#   r   rZ   r!   r%   r&   m  r'   z,GraniteMoeModel.__init__.<locals>.<listcomp>r   FZrope)rD   rE   Zpad_token_idZpadding_idx
vocab_sizer   Z	EmbeddingrJ   embed_tokensZ
ModuleListrz   r@   layersrB   r   normgradient_checkpointingembedding_multiplierr   r   r   rd   Z
rope_thetaZposition_embedding_typerX   
rotary_emb	post_initr   rL   rZ   r%   rE   f  s$    zGraniteMoeModel.__init__N)	input_idsr   rm   r   inputs_embedsr   r   output_hidden_statesr   return_dictr   r    c                 K   s2  |d ur|n| j j}|d ur |n| j j}|d ur4|n| j j}|
d urH|
n| j j}
|d u |d uA rhtd| jr| jr|rt	d d}|d u r| 
|}|| j }t|td tfstd|r|d u rt| j d}|d u r|d ur| nd}tj|||jd  |jd}|d u r"|d}| |||||}|}d }| jd urT| ||}|r^d	nd }|rld	nd }|	rzd	nd }| jD ]`}|r||f7 }|||||||||	|d
	}|d }|r||d f7 }|	r||d f7 }q| |}|r ||f7 }|
s tdd ||||fD S t|||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.rZ   r   r   r-   r!   )r   rm   r   r   r   r   r   r   r*   c                 s   s   | ]}|d ur|V  qd S )Nr!   )r#   vr!   r!   r%   	<genexpr>  s   z*GraniteMoeModel.forward.<locals>.<genexpr>)Zlast_hidden_stater   rQ   
attentionsr   )r[   r   r   r   use_return_dictr   r   r   r   r   r   r   r+   r^   r   r	   get_seq_lengthr.   aranger5   r-   r;   _update_causal_maskr   r   r   r,   r   )rI   r   r   rm   r   r   r   r   r   r   r   r   r   past_seen_tokensr   rQ   r   Zall_hidden_statesZall_self_attnsZall_router_logitsZdecoder_layerZlayer_outputsr!   r!   r%   rR     s    










zGraniteMoeModel.forwardFr   )r   input_tensorr   r   r   c                 C   sB  | j jdkr(|d ur$|dk r$|S d S | j jdkrLt|tjrHt|}|S |d ur\| nd}|d urn|jnd}| j jdkr|s|st	j
|||| jdrd S |j}|jd }	|r| }
n"t|tjr|jd	 n
||	 d }
| j||	|
|||jd d
}| j jdkr>|d ur>|jjdv r>|s>t|j}t	||}|S )NZflash_attention_2r   Zflex_attentionr   FZsdpa)r   Zpast_key_values_lengthZis_trainingr   r*   )r?   target_lengthrN   r   r>   )cudaZxpuZnpu)r[   r   anyr+   r.   rn   r   r   Zis_compileabler   Z_ignore_causal_mask_sdpar   rN   r5   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr-   r^   finfominZ_unmask_unattended)rI   r   r   r   r   r   r   Zusing_compilable_cacherN   r?   r   r   	min_dtyper!   r!   r%   r     sZ    






	z#GraniteMoeModel._update_causal_mask)r   r?   r   rN   r   r>   c                 K   sF  | dur|   dkr| }n&t|j}tj||f|||jd}|dkrVtj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| durB|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuerN   r-   r   )Zdiagonalr   r*   r   )r)   r.   r  r  fullr-   Ztriur   r7   r6   cloner5   r"   Zmasked_fill)r   r?   r   rN   r   r>   r   r   r  Zmask_lengthZpadding_maskr!   r!   r%   r  3  s*     $

6  zEGraniteMoeModel._prepare_4d_causal_attention_mask_with_cache_position)NNNNNNNNNNN)F)rT   rU   rV   r   rE   r   r   r.   r   rn   r   r   listr   r   r,   r   rR   r   staticmethodr:   rN   r  rW   r!   r!   rL   r%   r   d  sV              
u Dr   c                       s   e Zd ZdgZed fddZed
eej	 eej
 eej	 eeeeej f  eej eej	 ee ee ee ee ee eej	 eeej
f eeef ddd	Z  ZS )GraniteMoeForCausalLMzlm_head.weightrZ   c                    sX   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _|j| _|   d S )NFr   )rD   rE   r   r   r   r   r   rJ   lm_headrouter_aux_loss_coefr   r   r   r   r   rL   r!   r%   rE   o  s    
zGraniteMoeForCausalLM.__init__Nr   )r   r   rm   r   r   labelsr   r   r   r   r   r   logits_to_keepr    c                 K   s  |dur|n| j j}|
dur |
n| j j}
|	dur4|	n| j j}	|durH|n| j j}| jf ||||||||	|
||d|}|d }t|trt| dn|}| 	|dd|ddf }|| j j
 }d}|dur| }| j||fd| j ji|}d}|
r>t|r
|jn|d | j| j|}|dur>|| j||j 7 }|s~|f|dd  }|
rf|f| }|durz|f| S |S t||||j|j|j|jdS )al  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteMoeForCausalLM

        >>> model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b")
        >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)r   r   rm   r   r   r   r   r   r   r   r   r   r   r*   r   )lossaux_lossr   r   rQ   r   r   )r[   r   r   r   r   r   r+   r:   slicer  Zlogits_scalingr4   Zloss_functionr   rA   r   r   r   r  r"   r-   r   r   rQ   r   )rI   r   r   rm   r   r   r  r   r   r   r   r   r   r  r   r   rQ   Zslice_indicesr   r  r  outputr!   r!   r%   rR   |  sx    (

zGraniteMoeForCausalLM.forward)NNNNNNNNNNNNr   )rT   rU   rV   Z_tied_weights_keysr   rE   r   r   r.   r   rn   r   r   r  r   r   r:   r,   r   rR   rW   r!   r!   rL   r%   r
  l  s@                
r
  )r
  r   r   )Nr   N)Nr   )r   )Btypingr   r   r   r.   Ztorch.nn.functionalr   r0   r|   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zmodeling_attn_mask_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   utilsr   r   r   Zutils.deprecationr   Zconfiguration_granitemoer   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerrT   r   rn   r,   r:   rA   ModulerB   rX   rq   rt   ru   r   r   r   r   r4   r   r   r   r   r
  __all__r!   r!   r!   r%   <module>   sn   
   W%
.09^ Y  	