a
    h5                 
   @   sb  d Z ddlZddlZddlmZmZmZ ddlZddl	Z	ddl
Z	ddl	mZ ddlmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZmZ ddlmZ ddlmZ ddlmZm Z m!Z!m"Z"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z- ddl.m/Z/ ddl0m1Z1 e+ r8ddl2m3Z3 ddl4m5Z5 e-6e7Z8e	j9e:e:dddZ;G dd dej<Z=d6ej>e	j9e	j9e	j9ee	j9 ee? e?ee	j9 dddZ@G dd  d ej>ZAG d!d" d"eZBG d#d$ d$eZCe*G d%d& d&e&ZDG d'd( d(eDZEG d)d* d*eDZFe*G d+d, d,eDZGe*d-d.G d/d0 d0eDeZHG d1d2 d2eDZIG d3d4 d4eDeZJg d5ZKdS )7z=PyTorch MarianMTModel model, ported from the Marian C++ repo.    N)CallableOptionalUnion)nn)CrossEntropyLoss   )ACT2FN)CacheDynamicCacheEncoderDecoderCache)GenerationMixin)AttentionMaskConverter_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringis_torch_flex_attn_availableis_torchdynamo_compilinglogging)deprecate_kwarg   )MarianConfig)	BlockMask)make_flex_block_causal_mask)	input_idspad_token_iddecoder_start_token_idc                 C   sh   |  | j}| ddddf  |ddddf< ||dddf< |du rTtd||dk| |S )z1
    Shift input ids one token to the right.
    Nr   r   z1self.model.config.pad_token_id has to be defined.i)Z	new_zerosshapeclone
ValueErrorZmasked_fill_)r#   r$   r%   Zshifted_input_ids r*   f/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/marian/modeling_marian.pyshift_tokens_rightB   s    (r,   c                       sf   e Zd ZdZdeeee dd fddZdd Ze	 dej
eeej ejd	 fd
dZ  ZS )#MarianSinusoidalPositionalEmbeddingzDThis module produces sinusoidal positional embeddings of any length.N)num_positionsembedding_dimpadding_idxreturnc                    s   t  || d S N)super__init__)selfr.   r/   r0   	__class__r*   r+   r4   U   s    z,MarianSinusoidalPositionalEmbedding.__init__c              	      s   | j j\} t fddt|D }tj| | j jdd} d dkrR d n
 d d }tt	|dddddf |ddd|f< tt
|dddddf |dd|df< tj|dd	| _ dS )
z
        Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
        the 2nd half of the vector. [dim // 2:]
        c                    s$   g | ]  fd dt D qS )c              	      s(   g | ] }t d d|d     qS )i'     )nppower).0j)dimposr*   r+   
<listcomp>_       zOMarianSinusoidalPositionalEmbedding._init_weight.<locals>.<listcomp>.<listcomp>)range)r;   r=   )r>   r+   r?   _   r@   zDMarianSinusoidalPositionalEmbedding._init_weight.<locals>.<listcomp>F)dtyperequires_gradr8   r   r   N)rD   )weightr'   r9   arrayrA   torchemptyrC   FloatTensorsincosr   	Parameter)r5   Zn_posZposition_encoutsentinelr*   rB   r+   _init_weightX   s     22z0MarianSinusoidalPositionalEmbedding._init_weightr   )input_ids_shapepast_key_values_lengthposition_idsr1   c                    s@   |du r4|dd \}}t j||| t j| jjd}t |S )z3`input_ids_shape` is expected to be [bsz x seqlen].Nr8   )rC   device)rG   arangelongrE   rS   r3   forward)r5   rP   rQ   rR   bszZseq_lenr6   r*   r+   rV   g   s    z+MarianSinusoidalPositionalEmbedding.forward)N)r   N)__name__
__module____qualname____doc__intr   r4   rO   rG   Zno_gradSizeTensorrV   __classcell__r*   r*   r6   r+   r-   R   s    r-           )modulequerykeyvalueattention_maskscalingdropout	head_maskc                 K   s   |d u r| dd }t||dd| }	|d ur>|	| }	tjj|	dd}	|d urj|	|dddd }	tjj|	|| j	d}	t|	|}
|
dd
 }
|
|	fS )Nr&         r8   r   rB   r   ptraining)sizerG   matmul	transposer   
functionalZsoftmaxviewrg   rl   
contiguous)ra   rb   rc   rd   re   rf   rg   rh   kwargsattn_weightsattn_outputr*   r*   r+   eager_attention_forwardu   s    rv   c                       s   e Zd ZdZdeeeeeeee ee d fddZ	e
d	d
dddejeej ee eej eej eeej ee eejeej eeej  f d	ddZ  ZS )MarianAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr`   FTN)	embed_dim	num_headsrg   
is_decoderbias	is_causalconfig	layer_idxc	           	         s   t    || _|| _|| _|| | _|| _| j| | jkrTtd| j d| d| jd | _|| _	|| _
|| _|d u r| j	rtd| jj d tj|||d| _tj|||d| _tj|||d| _tj|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).ri   zInstantiating a decoder z without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.r{   )r3   r4   rx   ry   rg   head_dimr}   r)   rf   rz   r|   r~   loggerwarning_oncer7   rX   r   Lineark_projv_projq_projout_proj)	r5   rx   ry   rg   rz   r{   r|   r}   r~   r6   r*   r+   r4      s0    


zMarianAttention.__init__past_key_valuepast_key_values4.58new_nameversion)	hidden_stateskey_value_statesr   re   layer_head_maskoutput_attentionscache_positionrs   r1   c                 K   s  |du}	|j dd \}
}|	r(|j d n|}|
|d| jf}|
|d| jf}| |j| dd}|durt|tr|j| j	}|	r|j
}q|j}n|}|	r|n|}|	r|dur|r|j| j	 j}|j| j	 j}n|| |}| |}|j| dd}|j| dd}|durN|	s|nd}|||| j	d|i\}}|	rNd|j| j	< t}| jjdkrlt| jj }|| ||||f| jsdn| j| j||d	|\}}||
|d }| |}||fS )
z#Input shape: Batch x Time x ChannelNr&   r   r8   r   Teagerr`   )rg   rf   r   rh   )r'   r   r   rq   ro   
isinstancer   
is_updatedgetr~   Zcross_attention_cacheself_attention_cachelayerskeysvaluesr   r   updaterv   r}   _attn_implementationr   rl   rg   rf   reshaperr   r   )r5   r   r   r   re   r   r   r   rs   Zis_cross_attentionrW   tgt_lenZsrc_lenZq_input_shapeZkv_input_shapeZquery_statesr   Zcurr_past_key_valueZcurrent_statesZ
key_statesZvalue_statesZattention_interfaceru   rt   r*   r*   r+   rV      s`    






zMarianAttention.forward)r`   FTFNN)NNNNFN)rX   rY   rZ   r[   r\   floatboolr   r    r4   r   rG   r^   r	   r   r   tuplerV   r_   r*   r*   r6   r+   rw      sF         '      rw   c                	       s\   e Zd Zd	eee d fddZd
ejejejee	 e
ejeej f dddZ  ZS )MarianEncoderLayerNr}   r~   c                    s   t    |j| _t| j|j|j||d| _t	| j| _
|j| _t|j | _|j| _t| j|j| _t|j| j| _t	| j| _d S )N)rx   ry   rg   r}   r~   )r3   r4   d_modelrx   rw   Zencoder_attention_headsattention_dropout	self_attnr   	LayerNormself_attn_layer_normrg   r   activation_functionactivation_fnactivation_dropoutr   Zencoder_ffn_dimfc1fc2final_layer_normr5   r}   r~   r6   r*   r+   r4     s     
zMarianEncoderLayer.__init__F)r   re   r   r   r1   c           	      C   s  |}| j ||||d\}}tjj|| j| jd}|| }| |}|}| | |}tjj|| j| jd}| 	|}tjj|| j| jd}|| }| 
|}|jtjkrt| st| rt|jjd }tj|| |d}|f}|r||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   re   r   r   rj   i  )minmax)r   r   rp   rg   rl   r   r   r   r   r   r   rC   rG   Zfloat16isinfanyisnanfinfor   clamp)	r5   r   re   r   r   residualrt   Zclamp_valueoutputsr*   r*   r+   rV   (  s8    




zMarianEncoderLayer.forward)N)F)rX   rY   rZ   r    r   r\   r4   rG   rI   r   r   rV   r_   r*   r*   r6   r+   r     s    r   c                       s   e Zd Zdeee d fddZedddddej	eej	 eej	 eej	 eej	 eej	 ee
 ee ee eej	 eejeeejejf  f dddZ  ZS )MarianDecoderLayerNr   c              	      s   t    |j| _t| j|j|jdd||d| _|j| _t	|j
 | _|j| _t| j| _t| j|j|jd||d| _t| j| _t| j|j| _t|j| j| _t| j| _d S )NT)rx   ry   rg   rz   r|   r}   r~   )rg   rz   r}   r~   )r3   r4   r   rx   rw   Zdecoder_attention_headsr   r   rg   r   r   r   r   r   r   r   encoder_attnencoder_attn_layer_normr   Zdecoder_ffn_dimr   r   r   r   r6   r*   r+   r4   ]  s6    
	zMarianDecoderLayer.__init__r   r   r   r   FT)r   re   encoder_hidden_statesencoder_attention_maskr   cross_attn_layer_head_maskr   r   	use_cacher   r1   c              	   C   s  |}| j ||||||
d\}}tjj|| j| jd}|| }| |}d}|dur|}| j|||||||
d\}}tjj|| j| jd}|| }| |}|}| | 	|}tjj|| j
| jd}| |}tjj|| j| jd}|| }| |}|f}|r|||f7 }|S )a9  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        )r   r   re   r   r   r   rj   N)r   r   re   r   r   r   r   )r   r   rp   rg   rl   r   r   r   r   r   r   r   r   )r5   r   re   r   r   r   r   r   r   r   r   r   Zself_attn_weightsZcross_attn_weightsr   r*   r*   r+   rV   |  sL    #


	


zMarianDecoderLayer.forward)N)	NNNNNNFTN)rX   rY   rZ   r    r   r\   r4   r   rG   r^   r	   r   r   rI   rV   r_   r*   r*   r6   r+   r   \  s0            r   c                   @   s   e Zd ZU eed< dZdZdZdZdZ	dZ
eejejef dddZedd Zeejd	f ejd
ddZeeejdf  ejejedddZeejeeejejedddZeejd	f eejd	f ejejdddZd	S )MarianPreTrainedModelr}   modelT)ra   c                 C   s   | j j}t|tjr>|jjjd|d |jd ur|jj	  nvt|t
rR|  nbt|tjr|jjjd|d |jd ur|jj|j 	  n&t|tjr|jjd |jj	  d S )Nr`   )meanstd      ?)r}   Zinit_stdr   r   r   rE   dataZnormal_r{   Zzero_r-   rO   	Embeddingr0   r   Zfill_)r5   ra   r   r*   r*   r+   _init_weights  s    



z#MarianPreTrainedModel._init_weightsc                 C   s@   | j j}tjg ddddd|gg| jd}||||d}|S )N)r      
      r8   r         r8   rS   )re   r#   decoder_input_ids)r}   r$   rG   ZtensorrS   ne)r5   Z	pad_tokenr#   dummy_inputsr*   r*   r+   r     s    "z"MarianPreTrainedModel.dummy_inputsN)re   inputs_embedsc                 C   sv   |d urr| j jdkr&d|v r |nd }nL| j jdkr@t||j}n2| j jdkrft|tjrrt|dd}nt||j}|S )Nflash_attention_2r   sdpaflex_attentionF)r|   	r}   r   r   rC   r   rG   r^   r"   r   )r5   re   r   r*   r*   r+   _update_full_mask  s    z'MarianPreTrainedModel._update_full_maskr!   )re   input_tensorr   r   c                 C   sf  | j jdkrRt|tjr"t|}n,|d u rNttj|jd |jd f|jd}|S | j jdkrz|d urv|dk	 rv|S d S |d ur|
 nd}|d ur|jnd}| j jdkr|stj|||| jd	rd S |j}|jd }|r| }	n"t|tjr|jd
 n
|| d }	| j|||	|||jd d}
| j jdkrb|d urb|jjdv rbt|j}t|
|}
|
S )Nr   r   r   )rm   rS   r   r`   Fr   )r   rQ   Zis_trainingr&   )sequence_lengthtarget_lengthrC   r   
batch_size)cudaZxpuZnpu)r}   r   r   rG   r^   r"   onesr'   rS   r   get_seq_lengthZis_compileabler   Z_ignore_causal_mask_sdparl   rC   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positiontyper   r   Z_unmask_unattended)r5   re   r   r   r   Zpast_seen_tokensZusing_compilable_cacherC   r   r   causal_mask	min_dtyper*   r*   r+   _update_causal_mask  sd    







z)MarianPreTrainedModel._update_causal_mask)re   r   r   rC   r   r   c                 K   sF  | dur|   dkr| }n&t|j}tj||f|||jd}|dkrVtj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| durB|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        Nr   )Z
fill_valuerC   rS   r   )Zdiagonalr   r&   r   )r=   rG   r   r   fullrS   ZtriurT   r   expandr(   r'   toZmasked_fill)re   r   r   rC   r   r   rs   r   r   Zmask_lengthZpadding_maskr*   r*   r+   r   [  s*     $

6  zKMarianPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position)r   r   input_shaper   c                 C   s   |d ur|d ur| j jdkr.d|v r(|nd }nb| j jdkrPt||j|d d}n@| j jdkr|t|tjrt||d dd}nt||j|d d}|S )	Nr   r   r   r&   )r   r   F)Zquery_lengthr|   r   )r5   r   r   r   r   r*   r*   r+   _update_cross_attn_mask  s(    z-MarianPreTrainedModel._update_cross_attn_mask)rX   rY   rZ   r    __annotations__base_model_prefixZsupports_gradient_checkpointingZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnZ_can_compile_fullgraphr   r   r   r   r-   r   propertyr   rG   r^   r   r   r	   r   staticmethodr\   rC   r   r]   r   r*   r*   r*   r+   r     s>   

L9r   c                       s   e Zd ZdZd	eeej d fddZd
ee	j
 ee	j
 ee	j ee	j ee ee ee eee	j ef dddZ  ZS )MarianEncoderz
    Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
    [`MarianEncoderLayer`].

    Args:
        config: MarianConfig
        embed_tokens (nn.Embedding): output embedding
    Nr}   embed_tokensc                    s   t     j| _ j| _ j} j| _ j| _	 j
rBt|nd| _|d urX|| _nt j|| j| _t j|| j| _t fddt jD | _d| _|   d S )Nr   c                    s   g | ]}t  qS r*   )r   )r;   _r}   r*   r+   r?     r@   z*MarianEncoder.__init__.<locals>.<listcomp>F)r3   r4   rg   Zencoder_layerdrop	layerdropr   r$   r0   max_position_embeddingsZmax_source_positionsscale_embeddingmathsqrtembed_scaler   r   r   
vocab_sizer-   embed_positions
ModuleListrA   Zencoder_layersr   gradient_checkpointing	post_init)r5   r}   r   rx   r6   r   r+   r4     s     
 zMarianEncoder.__init__)r#   re   rh   r   r   output_hidden_statesreturn_dictr1   c                 C   s$  |dur|n| j j}|dur |n| j j}|dur4|n| j j}|durV|durVtdnP|dur| || | }|d|d }n"|dur| dd }ntd|du r| || j	 }| 
|}	||	 }
tjj|
| j| jd}
| ||}|rdnd}|rdnd}|durR| d t| jksRJ dt| j d	| d  d
t| jD ]\}}|rt||
f }d}| jrtg }|| jk rd}|rd}n*||
||dur|| nd|d}|d }
|r\||d f }q\|r||
f }|stdd |
||fD S t|
||dS )a~  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NzDYou cannot specify both input_ids and inputs_embeds at the same timer&   z5You have to specify either input_ids or inputs_embedsrj   r*   r   z&The head_mask should be specified for  layers, but it is for .FT)NN)r   r   r   c                 s   s   | ]}|d ur|V  qd S r2   r*   r;   vr*   r*   r+   	<genexpr>M  r@   z(MarianEncoder.forward.<locals>.<genexpr>last_hidden_stater   
attentions)r}   r   r   use_return_dictr)   Z%warn_if_padding_and_no_attention_maskrm   rq   r   r   r   r   rp   rg   rl   r   lenr   	enumeraterG   randr   r   r   )r5   r#   re   rh   r   r   r   r   r   Z	embed_posr   Zencoder_statesZall_attentionsidxZencoder_layerZto_dropdropout_probabilitylayer_outputsr*   r*   r+   rV     sl    .





zMarianEncoder.forward)N)NNNNNNN)rX   rY   rZ   r[   r    r   r   r   r4   rG   
LongTensorr^   rI   r   r   r   r   rV   r_   r*   r*   r6   r+   r     s&   	       r   c                       s   e Zd ZdZd	eeej d fddZd
ee	j
 ee	j ee	j ee	j
 ee	j ee	j ee ee	j ee ee ee ee ee	j eee	j ef dddZ  ZS )MarianDecoderz
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`]

    Args:
        config: MarianConfig
        embed_tokens (nn.Embedding): output embedding
    Nr   c                    s   t     j| _ j| _ j| _ j| _ j	r>t
 jnd| _|d urT|| _nt j j| j| _t j j| j| _t fddt jD | _d| _|   d S )Nr   c                    s   g | ]}t  |d qS ))r~   )r   )r;   ir   r*   r+   r?   l  r@   z*MarianDecoder.__init__.<locals>.<listcomp>F)r3   r4   rg   Zdecoder_layerdropr   r$   r0   r   Zmax_target_positionsr   r   r   r   r   r   r   r   decoder_vocab_sizer-   r   r   rA   Zdecoder_layersr   r   r   )r5   r}   r   r6   r   r+   r4   \  s     zMarianDecoder.__init__)r#   re   r   r   rh   cross_attn_head_maskr   r   r   r   r   r   r   r1   c           !      C   s  |
dur|
n| j j}
|dur |n| j j}|	dur4|	n| j j}	|durH|n| j j}| jrn| jrn|	rntd d}	|du |duA rt	dnR|dur|}|j
}|d|d }n.|dur| dd }|dddddf }|du r| |}|| j }|	r6|du r6|dur*tt| j dt| j dn
t| j d}|	r\t|tr\td t|}| dd \}}|dur| nd}|du rtj||| |jd	}|du rt s|| }tj|||jd	}t|tr|jn|}| ||||}| ||||}| j||f||d
}|| }tjj || j | jd}|rDdnd}|
rRdnd}|
rj|durjdnd}t!||gddgD ]V\}}|dur| d t"| j#ksJ d| dt"| j# d| d  dqt$| j#D ]\}}|r||f7 }| jrt%g }|| j&k rq||||||dur8|| nd|durL|| nd||
|	|d
} | d }|
r|| d f7 }|dur|| d f7 }q|r||f7 }|stdd |||||fD S t'|||||dS )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
                Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
                selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
                cross-attention on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz:You must specify exactly one of input_ids or inputs_embedsr&   r   zPassing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.r   r   )rR   rj   r*   rh   r  zThe `z` should be specified for r   r   )r   r   r   r   r   r   r   r   r8   c                 s   s   | ]}|d ur|V  qd S r2   r*   r   r*   r*   r+   r  H  s   z(MarianDecoder.forward.<locals>.<genexpr>)r  r   r   r  cross_attentions)(r}   r   r   r   r  r   rl   r   r   r)   r'   rq   rm   r   r   r   r
   r   r   Zfrom_legacy_cacher   rG   rT   rS   r   r   r   r   r   r   r   rp   rg   zipr  r   r  r	  r   r   )!r5   r#   re   r   r   rh   r  r   r   r   r   r   r   r   inputr   r   Z
seq_lengthrQ   Zmask_seq_lengthZself_attn_cacher   rR   r   Zall_hidden_statesZall_self_attnsZall_cross_attentionsZ	attn_maskZ	mask_namer
  Zdecoder_layerr  r  r*   r*   r+   rV   r  s    T













zMarianDecoder.forward)N)NNNNNNNNNNNNN)rX   rY   rZ   r[   r    r   r   r   r4   rG   r  r^   rI   r	   r   r   r   r   rV   r_   r*   r*   r6   r+   r  S  s>                r  c                       s   e Zd ZddgZed fddZdd Zdd	 Zd
d Zdd Z	dd Z
eejdddZedeej eej eej eej eej eej eej eeeej ef  ee eej eej ee ee ee ee eej edddZ  ZS )MarianModelzencoder.embed_tokens.weightzdecoder.embed_tokens.weightr   c                    s   t  | |j|j }}t||j|| _| jj	r@| j }}nt
| j}t
| j}d | _t||| _t||| _|   d S r2   )r3   r4   r$   r   r   r   r   sharedr}    share_encoder_decoder_embeddingscopydeepcopyr   encoderr  decoderr   )r5   r}   r0   r   Zencoder_embed_tokensZdecoder_embed_tokensr6   r*   r+   r4   Z  s    zMarianModel.__init__c                 C   s   |    S r2   )get_encoderget_input_embeddingsr5   r*   r*   r+   r  p  s    z MarianModel.get_input_embeddingsc                 C   s0   | j jr$|| _| j| j_| j| j_n|| j_d S r2   )r}   r  r  r  r   r  r5   rd   r*   r*   r+   set_input_embeddingst  s
    
z MarianModel.set_input_embeddingsc                 C   s   | j jrtd|   S )Nz`get_decoder_input_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `get_input_embeddings` instead.)r}   r  r)   get_decoderr  r  r*   r*   r+   get_decoder_input_embeddings|  s
    z(MarianModel.get_decoder_input_embeddingsc                 C   s   | j jrtd|| j_d S )Na   `config.share_encoder_decoder_embeddings` is set to `True` meaning the decoder input embeddings are shared with the encoder. In order to set the decoder input embeddings, you should simply set the encoder input embeddings by calling `set_input_embeddings` with the appropriate embeddings.)r}   r  r)   r  r   r  r*   r*   r+   set_decoder_input_embeddings  s
    z(MarianModel.set_decoder_input_embeddingsc                 C   s   | j S r2   )r  r  r*   r*   r+   r    s    zMarianModel.get_encodernew_num_tokensr1   c                 C   sV   | j jrtd|  }| ||}| | |  }|d u rB|S || j _|   |S Nz`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `resize_token_embeddings` instead.)r}   r  r)   r"  _get_resized_embeddingsr#  r  tie_weights)r5   r%  old_embeddingsnew_embeddingsmodel_embedsr*   r*   r+   resize_decoder_token_embeddings  s    
z+MarianModel.resize_decoder_token_embeddingsN)r#   re   r   decoder_attention_maskrh   decoder_head_maskr  encoder_outputsr   r   decoder_inputs_embedsr   r   r   r   r   r1   c                 C   s  |dur|n| j j}|dur |n| j j}|dur4|n| j j}|durH|n| j j}|du rr| j||||
|||d}nH|rt|tst|d t|dkr|d ndt|dkr|d ndd}| j	|||d ||||	||||||d}|s|| S t
|j|j|j|j|j|j|j|jdS )	a?  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianModel

        >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
        >>> model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")

        >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
        >>> decoder_inputs = tokenizer(
        ...     "<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen",
        ...     return_tensors="pt",
        ...     add_special_tokens=False,
        ... )
        >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)

        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 26, 512]
        ```N)r#   re   rh   r   r   r   r   r   r   r8   r  r#   re   r   r   rh   r  r   r   r   r   r   r   r   )r  r   decoder_hidden_statesdecoder_attentionsr  encoder_last_hidden_stater   encoder_attentions)r}   r   r   r   r  r  r   r   r  r  r   r  r   r   r  r  )r5   r#   re   r   r-  rh   r.  r  r/  r   r   r0  r   r   r   r   r   Zdecoder_outputsr*   r*   r+   rV     s`    >
zMarianModel.forward)NNNNNNNNNNNNNNNN)rX   rY   rZ   _tied_weights_keysr    r4   r  r   r"  r#  r  r\   r   r   r,  r   r   rG   r  r^   r   r   r   r	   rI   r   r   rV   r_   r*   r*   r6   r+   r  V  sX   	                r  zX
    The Marian Model with a language modeling head. Can be used for summarization.
    )Zcustom_introc                       s`  e Zd ZdZg dZddgZg dZed fddZd	d
 Z	dd Z
d$eee eejd fddZd%eejdddZdd ZeddddZejdddZdd Zed&eej eej eej eej eej eej eej eeeej ef  ee eej eej eej ee ee ee ee eej eddd Z ejd!d"d#Z!  Z"S )'MarianMTModelr   )final_logits_biaszencoder.embed_positions.weightzdecoder.embed_positions.weightz$model.encoder.embed_positions.weightz$model.decoder.embed_positions.weight)z!model.encoder.embed_tokens.weightz!model.decoder.embed_tokens.weightlm_head.weightr   c                    s^   t  | t|| _|jr"|jn|j}| dt	d|f t
j|j|dd| _|   d S )Nr8  r   Fr   )r3   r4   r  r   r  r   r  register_bufferrG   zerosr   r   r   lm_headr   )r5   r}   Ztarget_vocab_sizer6   r*   r+   r4   .  s    
zMarianMTModel.__init__c                 C   s
   | j  S r2   )r   r  r  r*   r*   r+   r  9  s    zMarianMTModel.get_encoderc                 C   s
   | j  S r2   )r   r!  r  r*   r*   r+   r!  <  s    zMarianMTModel.get_decoderNT)r%  pad_to_multiple_ofmean_resizingr1   c                    s&   t  |||}| jjr"| | |S r2   )r3   resize_token_embeddingsr}   r  _resize_final_logits_bias)r5   r%  r=  r>  r*  r6   r*   r+   r?  ?  s    
z%MarianMTModel.resize_token_embeddingsr$  c                 G   s~   |   }| |||}| | |jjd }| jjr<|| j_| jjrv|  d urv| jj	sv|  }| 
||}| | |   S )Nr   )r  r'  r   rE   r'   r}   r  r  get_output_embeddingstie_word_embeddings_get_resized_lm_headset_output_embeddings)r5   r%  r=  argsr)  r*  old_lm_headnew_lm_headr*   r*   r+   _resize_token_embeddingsH  s     


z&MarianMTModel._resize_token_embeddingsc                 C   s   | j jrtd| j }| ||}| j| |  d urd| j jsd|  }| 	||}| 
| | j }|d u rz|S || j _|   | | |S r&  )r}   r  r)   r   r"  r'  r#  rA  rB  rC  rD  r  r(  r@  )r5   r%  r)  r*  rF  rG  r+  r*   r*   r+   r,  ^  s$    



z-MarianMTModel.resize_decoder_token_embeddingsc                 C   sj   | j jd }||kr,| j d d d |f }n.tjd|| f| j jd}tj| j |gdd}| d| d S )Nr&   r   r   rB   r8  )r8  r'   rG   r;  rS   catr:  )r5   r%  Zold_num_tokensZnew_biasZ
extra_biasr*   r*   r+   r@  ~  s    z'MarianMTModel._resize_final_logits_bias)r*  c                 C   s
   || _ d S r2   )r<  )r5   r*  r*   r*   r+   rD    s    z#MarianMTModel.set_output_embeddingsc                 C   s   |   }|dur6t| jddr6|   }| || t| jddrt| jddrt| | jrjt| | j} | | j	| j
| jd}|| _|  D ]}t|dr|  qdS )	z
        Tie the weights between the input embeddings and the output embeddings.

        If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the
        weights instead.
        NrB  Tis_encoder_decoderFZtie_encoder_decoderr  _tie_weights)rA  getattrr}   r!  r  Z_tie_or_clone_weightshasattrr   Z_tie_encoder_decoder_weightsr  r  Z_dynamic_tied_weights_keysmodulesrK  )r5   Zoutput_embeddingsZword_embeddingsZtied_weightsra   r*   r*   r+   r(    s    
zMarianMTModel.tie_weights)r#   re   r   r-  rh   r.  r  r/  r   r   r0  labelsr   r   r   r   r   r1   c                 C   s  |dur|n| j j}|durR|r*td d}|du rR|du rRt|| j j| j j}| j|||||||||	|
||||||d}| |d | j	 }d}|durt
 }||d| j j|d}|s|f|dd  }|dur|f| S |S t|||j|j|j|j|j|j|jd	S )	uH	  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianMTModel

        >>> src = "fr"  # source language
        >>> trg = "en"  # target language

        >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}"
        >>> model = MarianMTModel.from_pretrained(model_name)
        >>> tokenizer = AutoTokenizer.from_pretrained(model_name)

        >>> sample_text = "où est l'arrêt de bus ?"
        >>> batch = tokenizer([sample_text], return_tensors="pt")

        >>> generated_ids = model.generate(**batch)
        >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        "Where's the bus stop?"
        ```
        NzJThe `use_cache` argument is changed to `False` since `labels` is provided.F)re   r   r/  r-  rh   r.  r  r   r   r0  r   r   r   r   r   r   r&   r   )	losslogitsr   r2  r3  r  r4  r   r5  )r}   r  r   warningr,   r$   r%   r   r<  r8  r   rq   r  r   r   r2  r3  r  r4  r   r5  )r5   r#   re   r   r-  rh   r.  r  r/  r   r   r0  rO  r   r   r   r   r   r   Z	lm_logitsZmasked_lm_lossloss_fctoutputr*   r*   r+   rV     s\    C
zMarianMTModel.forward)rO  c                 C   s   t || jj| jjS r2   )r,   r}   r$   r%   )r5   rO  r*   r*   r+   %prepare_decoder_input_ids_from_labels  s    z3MarianMTModel.prepare_decoder_input_ids_from_labels)NT)N)NNNNNNNNNNNNNNNNN)#rX   rY   rZ   r   Z_keys_to_ignore_on_load_missingZ_keys_to_ignore_on_saver6  r    r4   r  r!  r\   r   r   r   r   r?  rH  r,  r@  rD  r(  r   rG   r  r^   r   r   r   r	   rI   r   rV   rU  r_   r*   r*   r6   r+   r7    sp    
	 	                 vr7  c                       s(   e Zd ZdZ fddZdd Z  ZS )MarianDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                    s   t  | t|| _d S r2   )r3   r4   r  r  r5   r}   r6   r*   r+   r4   (  s    zMarianDecoderWrapper.__init__c                 O   s   | j |i |S r2   )r  )r5   rE  rs   r*   r*   r+   rV   ,  s    zMarianDecoderWrapper.forward)rX   rY   rZ   r[   r4   rV   r_   r*   r*   r6   r+   rV  "  s   rV  c                       s   e Zd ZdgZ fddZdd Zdd Zdd	 Zd
d Ze	de
ej e
ej e
ej e
ej e
ej e
ej e
e e
ej e
ej e
e e
e e
e e
e e
ej eeef dddZ  ZS )MarianForCausalLMr9  c                    sD   d|_ d|_t | t|| _tj|j|j	dd| _
|   d S )NTFr   )rz   rJ  r3   r4   rV  r   r   r   Zhidden_sizer   r<  r   rW  r6   r*   r+   r4   4  s    
zMarianForCausalLM.__init__c                 C   s
   | j jjS r2   r   r  r   r  r*   r*   r+   r  ?  s    z&MarianForCausalLM.get_input_embeddingsc                 C   s   || j j_d S r2   rY  r  r*   r*   r+   r   B  s    z&MarianForCausalLM.set_input_embeddingsc                 C   s   || j _d S r2   r   r  )r5   r  r*   r*   r+   set_decoderE  s    zMarianForCausalLM.set_decoderc                 C   s   | j jS r2   rZ  r  r*   r*   r+   r!  H  s    zMarianForCausalLM.get_decoderN)r#   re   r   r   rh   r  r   r   rO  r   r   r   r   r   r1   c                 C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}| jj|||||||||
||||d}| |d }d}|	dur|	|j}	t	 }||
d| j j|	
d}|s|f|dd  }|dur|f| S |S t|||j|j|j|jdS )a  
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianForCausalLM

        >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en")
        >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False)
        >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> logits = outputs.logits
        >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
        >>> list(logits.shape) == expected_shape
        True
        ```Nr1  r   r&   r   )rP  rQ  r   r   r  r  )r}   r   r   r  r   r  r<  r   rS   r   rq   r   r   r   r   r  r  )r5   r#   re   r   r   rh   r  r   r   rO  r   r   r   r   r   r   rQ  rP  rS  rT  r*   r*   r+   rV   K  sH    .zMarianForCausalLM.forward)NNNNNNNNNNNNNN)rX   rY   rZ   r6  r4   r  r   r[  r!  r   r   rG   r  r^   rI   r	   r   r   r   r   rV   r_   r*   r*   r6   r+   rX  1  sL                 
rX  )rX  r  r7  r   )Nr`   N)Lr[   r  r   typingr   r   r   numpyr9   rG   Ztorch.utils.checkpointr   Ztorch.nnr   Zactivationsr   Zcache_utilsr	   r
   r   Z
generationr   Zmodeling_attn_mask_utilsr   r   r   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   r   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   Zutils.deprecationr   Zconfiguration_marianr    Z!torch.nn.attention.flex_attentionr!   Zintegrations.flex_attentionr"   Z
get_loggerrX   r   r^   r\   r,   r   r-   Moduler   rv   rw   r   r   r   r   r  r  r7  rV  rX  __all__r*   r*   r*   r+   <module>   s~   
)    Hu g    I   u