a
    hZ                  
   @   s<  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- edG dd dej.Z/G dd dej.Z0dd Z1d8ddZ2ej3e4ej3ddd Z5d9ej.ej3ej3ej3eej3 e6e6e#e% d"d#d$Z7G d%d& d&ej.Z8G d'd( d(eZ9e&G d)d* d*e!Z:G d+d, d,ej.Z;e&G d-d. d.e:Z<e&G d/d0 d0e:eZ=G d1d2 d2ee:Z>G d3d4 d4ee:Z?G d5d6 d6ee:Z@g d7ZAdS ):    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GenericForQuestionAnswering GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )Qwen3ConfigZRMSNormc                       sB   e Zd Zdedd fddZejejdddZd	d
 Z  Z	S )Qwen3RMSNormư>N)epsreturnc                    s&   t    tt|| _|| _dS )z;
        Qwen3RMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	ParametertorchZonesweightvariance_epsilon)selfhidden_sizer#   	__class__ d/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/qwen3/modeling_qwen3.pyr&   3   s    
zQwen3RMSNorm.__init__)hidden_statesr$   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)Zkeepdim)	dtypetor(   float32powmeanZrsqrtr*   r)   )r+   r1   Zinput_dtypeZvariancer/   r/   r0   forward;   s
    zQwen3RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler)   shaper*   )r+   r/   r/   r0   
extra_reprB   s    zQwen3RMSNorm.extra_repr)r"   )
__name__
__module____qualname__floatr&   r(   Tensorr9   r<   __classcell__r/   r/   r-   r0   r!   1   s   r!   c                       s$   e Zd Z fddZdd Z  ZS )Qwen3MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFZbias)r%   r&   configr,   Zintermediate_sizer   Linear	gate_projup_proj	down_projr   Z
hidden_actact_fnr+   rF   r-   r/   r0   r&   G   s    
zQwen3MLP.__init__c                 C   s$   |  | | || | }|S )N)rJ   rK   rH   rI   )r+   xrJ   r/   r/   r0   r9   Q   s     zQwen3MLP.forward)r=   r>   r?   r&   r9   rB   r/   r/   r-   r0   rC   F   s   
rC   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr3   r2   dim)r;   r(   cat)rM   x1Zx2r/   r/   r0   rotate_halfV   s    rR   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerR   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr/   r/   r0   apply_rotary_pos_emb]   s
    

rY   )r1   n_repr$   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r;   expandreshape)r1   rZ   batchnum_key_value_headsslenhead_dimr/   r/   r0   	repeat_kvx   s
    0ra           )modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d urf|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr2   r   r3   )rO   r4   )ptrainingr   )ra   num_key_value_groupsr(   matmul	transposer;   r   Z
functionalZsoftmaxr6   r5   r4   ri   rm   
contiguous)rc   rd   re   rf   rg   rh   ri   rj   
key_statesvalue_statesattn_weightsZcausal_maskattn_outputr/   r/   r0   eager_attention_forward   s    
&rv   c                       s   e Zd ZdZeed fddZedddddej	e
ej	ej	f eej	 ee eej ee e
ej	eej	 f d
ddZ  ZS )Qwen3Attentionz=Multi-headed attention from 'Attention Is All You Need' paperrF   	layer_idxc                    s  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _t| j|jd| _t| j|jd| _|j| dkr|jnd | _d S )Nr`   g      TrE   r#   sliding_attention)r%   r&   rF   ry   getattrr,   Znum_attention_headsr`   r^   rn   rh   attention_dropoutZ	is_causalr   rG   Zattention_biasq_projk_projv_projo_projr!   rms_norm_epsq_normk_normlayer_typessliding_windowr+   rF   ry   r-   r/   r0   r&      s.    
zQwen3Attention.__init__past_key_valuepast_key_values4.58new_nameversionN)r1   position_embeddingsrg   r   cache_positionrj   r$   c                 K   s4  |j d d }g |d| jR }| | ||dd}	| | ||dd}
| ||dd}|\}}t	|	|
||\}	}
|d ur|||d}|
|
|| j|\}
}t}| jjdkrt| jj }|| |	|
||f| jsdn| j| j| jd|\}}|jg |dR   }| |}||fS )Nr3   r   r2   )rW   rV   r   eagerrb   )ri   rh   r   )r;   r`   r   r~   viewrp   r   r   r   rY   updatery   rv   rF   Z_attn_implementationr   rm   r}   rh   r   r\   rq   r   )r+   r1   r   rg   r   r   rj   Zinput_shapeZhidden_shapeZquery_statesrr   rs   rV   rW   Zcache_kwargsZattention_interfaceru   rt   r/   r/   r0   r9      s:    
	

zQwen3Attention.forward)NN)r=   r>   r?   __doc__r    intr&   r   r(   rA   r:   r   r   
LongTensorr   r   r9   rB   r/   r/   r-   r0   rw      s     rw   c                       s   e Zd Zeed fddZedddddeje	ej e	ej
 e	e e	e e	ej
 e	eejejf  ee ejd
	ddZ  ZS )Qwen3DecoderLayerrx   c                    s^   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| | _d S )Nrx   rz   )r%   r&   r,   rw   	self_attnrC   mlpr!   r   input_layernormpost_attention_layernormr   attention_typer   r-   r/   r0   r&      s    

zQwen3DecoderLayer.__init__r   r   r   r   NF)	r1   rg   rX   r   	use_cacher   r   rj   r$   c              
   K   s^   |}	|  |}| jf |||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r1   rg   rX   r   r   r   r   )r   r   r   r   )r+   r1   rg   rX   r   r   r   r   rj   Zresidual_r/   r/   r0   r9      s&    




zQwen3DecoderLayer.forward)NNNFNN)r=   r>   r?   r    r   r&   r   r(   rA   r   r   r   boolr:   r   r   r9   rB   r/   r/   r-   r0   r      s&         r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )Qwen3PreTrainedModelrF   modelTr   r   )r1   
attentionsN)r=   r>   r?   r    __annotations__base_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnZ_can_compile_fullgraphZ_supports_attention_backendr   rw   Z_can_record_outputsr/   r/   r/   r0   r     s   
r   c                       sD   e Zd ZU ejed< ded fddZe e	dd Z
  ZS )	Qwen3RotaryEmbeddinginv_freqNrF   c                    s   t    t|dr:t|jtr:|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr   F)
persistent)r%   r&   hasattr
isinstancer   dictgetr   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrF   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r+   rF   devicer   r-   r/   r0   r&   .  s    
zQwen3RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtrl|jjdkrl|jjnd}t	j
|ddV | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 s0    Y  |j|jd
|	j|jd
fS )Nr   r3   r   ZmpscpuF)device_typeZenabledr2   rN   )r4   )r   r@   r[   r;   r5   r   r   r   strr(   Zautocastrp   rP   rV   r   rW   r4   )
r+   rM   rX   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembrV   rW   r/   r/   r0   r9   ?  s    0&,zQwen3RotaryEmbedding.forward)N)r=   r>   r?   r(   rA   r   r    r&   Zno_gradr   r9   rB   r/   r/   r-   r0   r   +  s
   

r   c                       st   e Zd Zed fddZeedeej	 eej
 eej	 ee eej ee eej	 ee ed	ddZ  ZS )	
Qwen3Modelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _d| jjv | _|   d S )Nc                    s   g | ]}t  |qS r/   )r   ).0ry   r   r/   r0   
<listcomp>X      z'Qwen3Model.__init__.<locals>.<listcomp>rz   r   Fr{   )r%   r&   Zpad_token_idZpadding_idx
vocab_sizer   Z	Embeddingr,   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr!   r   normr   
rotary_embZgradient_checkpointingrF   r   has_sliding_layers	post_initrL   r-   r   r0   r&   Q  s    zQwen3Model.__init__N)		input_idsrg   rX   r   inputs_embedsr   r   rj   r$   c              
   K   sD  |d u |d uA rt d|d u r*| |}|rB|d u rBt| jd}|d u rz|d urZ| nd}	tj|	|	|jd  |jd}|d u r|	d}t
| }
ts| j|||||d}dtf i |i}
| jrtf i ||
d< |}| ||}| jd | jj D ](}||f|
|j |||||d	|}q| |}t||r<|nd d
S )Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r   )rF   Zinput_embedsrg   r   r   rX   Zfull_attentionr{   )rg   rX   r   r   r   r   )last_hidden_stater   )
ValueErrorr   r	   rF   Zget_seq_lengthr(   Zaranger;   r   rS   r   r   r   r   r   r   r   r   r   r   r   )r+   r   rg   rX   r   r   r   r   rj   Zpast_seen_tokensZcausal_mask_mappingZmask_kwargsr1   r   Zdecoder_layerr/   r/   r0   r9   b  sZ    



zQwen3Model.forward)NNNNNNN)r=   r>   r?   r    r&   r   r   r   r(   r   rA   r   FloatTensorr   r   r   r   r9   rB   r/   r/   r-   r0   r   O  s*          r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZeede	e
j e	e
j e	e
j e	e e	e
j e	e
j e	e e	e
j eee
jf ee ed
ddZ  ZS )Qwen3ForCausalLMzlm_head.weightlm_headZcolwise_repr1   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rD   )
r%   r&   r   r   r   r   rG   r,   r   r   rL   r-   r/   r0   r&     s
    
zQwen3ForCausalLM.__init__Nr   )r   rg   rX   r   r   labelsr   r   logits_to_keeprj   r$   c
              
   K   s   | j f |||||||d|
}|j}t|	tr<t|	 dn|	}| |dd|ddf }d}|dur| jf ||| jjd|
}t	|||j
|j|jdS )a^  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Qwen3ForCausalLM

        >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
        >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   rg   rX   r   r   r   r   N)r   r   r   )lossr   r   r1   r   )r   r   r   r   slicer   Zloss_functionrF   r   r   r   r1   r   )r+   r   rg   rX   r   r   r   r   r   r   rj   outputsr1   Zslice_indicesr   r   r/   r/   r0   r9     s0    %zQwen3ForCausalLM.forward)	NNNNNNNNr   )r=   r>   r?   Z_tied_weights_keysZ_tp_planZ_pp_planr&   r   r   r   r(   r   rA   r   r   r   r   r   r   r   r   r9   rB   r/   r/   r-   r0   r     s8   	         r   c                   @   s   e Zd ZdS )Qwen3ForSequenceClassificationNr=   r>   r?   r/   r/   r/   r0   r     s   r   c                   @   s   e Zd ZdS )Qwen3ForTokenClassificationNr   r/   r/   r/   r0   r     s   r   c                   @   s   e Zd ZdZdS )Qwen3ForQuestionAnsweringZtransformerN)r=   r>   r?   r   r/   r/   r/   r0   r     s   r   )r   r   r   r   r   r   )Nr   )rb   )Btypingr   r   r   r(   r   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zintegrationsr   Zmasking_utilsr   r   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   r   r   r   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   Zutils.deprecationr   Zutils.genericr   Zconfiguration_qwen3r    Moduler!   rC   rR   rY   rA   r   ra   r@   rv   rw   r   r   r   r   r   r   r   r   __all__r/   r/   r/   r0   <module>   s^   
 K/$\P