a
    h                 
   @   s  d Z ddlZddlZddlmZ ddlmZmZmZ ddl	Z
ddlZddlZddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZ ddlmZ ddlmZmZm Z m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) ddl*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2 ddl3m4Z4 dZ5dZ6e0 r@ddl7m8Z9 e1 rTddl:m;Z; e2<e=Z>dZ?ee,ddG dd de+Z@ddeAeBeBf eCeBeejD eBe
jEdddZFdeeAeBee
jE d d!d"ZGG d#d$ d$eZHG d%d& d&eZIG d'd( d(eZJG d)d* d*ejKZLG d+d, d,ejKZMG d-d. d.ejKZNG d/d0 d0eNZOG d1d2 d2ejKZPdfejKejQejQejQeejQ eeC eCeejQ d4d5d6ZRG d7d8 d8ejKZSG d9d: d:ejKZTG d;d< d<eZUG d=d> d>eZVG d?d@ d@ejKZWG dAdB dBejKZXG dCdD dDejKZYG dEdF dFejKZZG dGdH dHejKZ[G dIdJ dJejKZ\e,G dKdL dLe'Z]e,G dMdN dNe]Z^e,dOdG dPdQ dQe]Z_e,G dRdS dSe]Z`e,dTdG dUdV dVe]Zae,dWdG dXdY dYe]Zbe,G dZd[ d[e]ZcG d\d] d]ejKZdG d^d_ d_ejKZee,d`dG dadb dbe]Zfg dcZgdS )gzPyTorch Wav2Vec2 model.    N)	dataclass)CallableOptionalUnion)nn)CrossEntropyLoss   )ACT2FN)is_deepspeed_zero3_enabled)is_fsdp_managed_module)_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputCausalLMOutputMaskedLMOutputSequenceClassifierOutputTokenClassifierOutputWav2Vec2BaseModelOutputXVectorOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ModelOutputauto_docstringcached_filecheck_torch_load_is_safeis_peft_availableis_safetensors_availableis_torch_flex_attn_availablelogging   )Wav2Vec2Configzadapter.{}.binzadapter.{}.safetensors)	load_file)make_flex_block_causal_mask   za
    Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions.
    )Zcustom_introc                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeeej  ed< dZeeej  ed< dZeej ed	< dZeej ed
< dS )Wav2Vec2ForPreTrainingOutputa  
    loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
        Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
        paper](https://huggingface.co/papers/2006.11477).
    projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
        Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
        projected quantized states.
    projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
        Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
        target vectors for contrastive loss.
    codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
        The perplexity of the codevector distribution, used to measure the diversity of the codebook.
    contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
        The contrastive loss (L_m) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
    diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
        The diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
    Nlossprojected_statesprojected_quantized_statescodevector_perplexityhidden_states
attentionscontrastive_lossdiversity_loss)__name__
__module____qualname____doc__r(   r   torchFloatTensor__annotations__r)   r*   r+   r,   tupler-   r.   r/    r8   r8   j/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.pyr'   N   s   
r'   )shape	mask_probmask_lengthattention_mask	min_masksreturnc                    s  | \}dk rt dkr6t d d dtjd   fdd}|durt| d	 nfd
dt|D }tj	|ft
d}g }	|}
|
dkr|S |D ]v}||}tjjt|d  |dd}t|dkrd }n|d }t|tj|
| tjd| g}|	| qt|	}	t|	dddddf ||
f}	|	||
 }	tddddf }t|||
f||
 }|	| }	|	 d kr҈d |	|	d k< t||	dd	 |S )an  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r"   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    sX   t |     }t|}| kr2 }| d  |k rTt| d  d}|S )z;Given input length, compute how many spans should be maskedr"   r   )intmax)input_lengthnum_masked_spanepsilonr<   r;   r>   sequence_lengthr8   r9   compute_num_masked_span   s    
z6_compute_mask_indices.<locals>.compute_num_masked_spanNc                    s   g | ]} qS r8   r8   .0_)rG   r8   r9   
<listcomp>       z)_compute_mask_indices.<locals>.<listcomp>dtyper   F)replace)
ValueErrornprandomranditemdetachsumtolistrangezerosboolchoicearangelenZconcatenateonesint32appendarraybroadcast_toreshaperB   Zput_along_axis)r:   r;   r<   r=   r>   
batch_sizerH   input_lengthsZspec_aug_maskZspec_aug_mask_idxsZmax_num_masked_spanrC   rD   Zspec_aug_mask_idxZdummy_mask_idxoffsetsr8   rE   r9   _compute_mask_indicesq   s\    

ri   )features_shapenum_negativesmask_time_indicesc                 C   s   | \}}t |}t j|||ft jd}|dur:|tnt j| td}t|D ]}||  d }|||  }	t 	t |d dddf |d |f}
t j
jd||d |fd}|||
k  d7  < |	| || || < ||  || 7  < qP|S )z>
    Sample `num_negatives` vectors from feature vectors.
    )r:   rP   NrO   r"   r   )size)rS   r^   r[   ra   Zastyper\   r`   rZ   rX   rd   rT   randint)rj   rk   rl   rf   rG   Zsequence_length_rangesampled_negative_indicesZ	batch_idxhighZmapped_masked_indicesZfeature_indicesZsampled_indicesr8   r8   r9   _sample_negative_indices   s    
*rq   c                       s&   e Zd Zd fdd	Zdd Z  ZS )Wav2Vec2NoLayerNormConvLayerr   c                    sj   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _d S )Nr   r"   kernel_sizestridebias)super__init__conv_dimin_conv_dimout_conv_dimr   Conv1dconv_kernelconv_stride	conv_biasconvr	   feat_extract_activation
activationselfconfiglayer_id	__class__r8   r9   rx     s    
z%Wav2Vec2NoLayerNormConvLayer.__init__c                 C   s   |  |}| |}|S N)r   r   r   r,   r8   r8   r9   forward  s    

z$Wav2Vec2NoLayerNormConvLayer.forward)r   r0   r1   r2   rx   r   __classcell__r8   r8   r   r9   rr     s   rr   c                       s&   e Zd Zd fdd	Zdd Z  ZS )Wav2Vec2LayerNormConvLayerr   c                    s|   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
tj| jdd| _t|j | _d S )Nr   r"   rs   T)Zelementwise_affine)rw   rx   ry   rz   r{   r   r|   r}   r~   r   r   	LayerNorm
layer_normr	   r   r   r   r   r8   r9   rx   "  s    
z#Wav2Vec2LayerNormConvLayer.__init__c                 C   s:   |  |}|dd}| |}|dd}| |}|S )NrI   )r   	transposer   r   r   r8   r8   r9   r   1  s    


z"Wav2Vec2LayerNormConvLayer.forward)r   r   r8   r8   r   r9   r   !  s   r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )Wav2Vec2GroupNormConvLayerr   c                    s   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _tj| j| jdd| _d S )Nr   r"   rs   T)
num_groupsZnum_channelsZaffine)rw   rx   ry   rz   r{   r   r|   r}   r~   r   r   r	   r   r   	GroupNormr   r   r   r8   r9   rx   =  s    
z#Wav2Vec2GroupNormConvLayer.__init__c                 C   s"   |  |}| |}| |}|S r   )r   r   r   r   r8   r8   r9   r   M  s    


z"Wav2Vec2GroupNormConvLayer.forward)r   r   r8   r8   r   r9   r   <  s   r   c                       s$   e Zd Z fddZdd Z  ZS )Wav2Vec2PositionalConvEmbeddingc                    s$  t    tj|j|j|j|jd |jd| _tjj	}t
tjjdrNtjjj	}t rdd l}|jj| jjdd" || jddd| _W d    n1 s0    Y  t
| jdr| jjjj}| jjjj}n| jj}| jj}|j| | |j| | n|| jddd| _t|j| _t|j | _d S )	Nr&   )rt   paddinggroupsweight_normr   )Zmodifier_rankweight)namedimparametrizations)rw   rx   r   r|   hidden_sizenum_conv_pos_embeddingsZnum_conv_pos_embedding_groupsr   utilsr   hasattrr   r
   	deepspeedzeroZGatheredParametersr   Z	original0Z	original1weight_gweight_vZregister_external_parameterWav2Vec2SamePadLayerr   r	   r   r   )r   r   r   r   r   r   r   r8   r9   rx   U  s2    

0z(Wav2Vec2PositionalConvEmbedding.__init__c                 C   s:   | dd}| |}| |}| |}| dd}|S Nr"   r&   )r   r   r   r   r   r8   r8   r9   r   v  s    


z'Wav2Vec2PositionalConvEmbedding.forwardr   r8   r8   r   r9   r   T  s   !r   c                       s$   e Zd Z fddZdd Z  ZS )r   c                    s$   t    |d dkrdnd| _d S )Nr&   r   r"   )rw   rx   num_pad_remove)r   r   r   r8   r9   rx     s    
zWav2Vec2SamePadLayer.__init__c                 C   s,   | j dkr(|d d d d d | j  f }|S )Nr   )r   r   r8   r8   r9   r     s    
zWav2Vec2SamePadLayer.forwardr   r8   r8   r   r9   r     s   r   c                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )Wav2Vec2FeatureEncoderz.Construct the features from raw audio waveformc                    s   t     jdkr@t ddg fddt jd D  }n6 jdkrd fddt jD }ntd	 j d
t|| _	d| _
d| _d S )Ngroupr   r   c                    s   g | ]}t  |d  dqS )r"   r   )rr   rK   ir   r8   r9   rM     s   z3Wav2Vec2FeatureEncoder.__init__.<locals>.<listcomp>r"   layerc                    s   g | ]}t  |d qS )r   )r   r   r   r8   r9   rM     s   z`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)rw   rx   Zfeat_extract_normr   rZ   Znum_feat_extract_layersrR   r   
ModuleListconv_layersgradient_checkpointing_requires_grad)r   r   r   r   r   r9   rx     s    




zWav2Vec2FeatureEncoder.__init__c                 C   s   |   D ]
}d|_qd| _d S )NF)
parametersrequires_gradr   r   paramr8   r8   r9   _freeze_parameters  s    z)Wav2Vec2FeatureEncoder._freeze_parametersc                 C   s:   |d d d f }| j r"| jr"d|_| jD ]}||}q(|S )NT)r   trainingr   r   )r   input_valuesr,   Z
conv_layerr8   r8   r9   r     s    

zWav2Vec2FeatureEncoder.forward)r0   r1   r2   r3   rx   r   r   r   r8   r8   r   r9   r     s   r   c                       s   e Zd Z fddZ  ZS )Wav2Vec2FeatureExtractorc                    s8   t  | td| jj d| jjd j dt d S )NzThe class `zD` has been depreciated and will be removed in Transformers v5. Use `r   z
` instead.)rw   rx   warningswarnr   r0   	__bases__FutureWarningr   r   r   r8   r9   rx     s    z!Wav2Vec2FeatureExtractor.__init__)r0   r1   r2   rx   r   r8   r8   r   r9   r     s   r   c                       s$   e Zd Z fddZdd Z  ZS )Wav2Vec2FeatureProjectionc                    sJ   t    tj|jd |jd| _t|jd |j| _	t
|j| _d S )NrI   eps)rw   rx   r   r   ry   layer_norm_epsr   Linearr   
projectionDropoutZfeat_proj_dropoutdropoutr   r   r8   r9   rx     s    
z"Wav2Vec2FeatureProjection.__init__c                 C   s&   |  |}| |}| |}||fS r   )r   r   r   )r   r,   Znorm_hidden_statesr8   r8   r9   r     s    


z!Wav2Vec2FeatureProjection.forwardr   r8   r8   r   r9   r     s   r           )modulequerykeyvaluer=   scalingr   	head_maskc                 K   s   |d u r| dd }t||dd| }	|d ur>|	| }	tjj|	dd}	|d urj|	|dddd }	tjj|	|| j	d}	t|	|}
|
dd
 }
|
|	fS )NrI         r&   r   r   r"   )pr   )rm   r4   matmulr   r   
functionalsoftmaxviewr   r   
contiguous)r   r   r   r   r=   r   r   r   kwargsattn_weightsattn_outputr8   r8   r9   eager_attention_forward  s    r   c                       s   e Zd ZdZdeeeeeeee d fddZ	de
jee
j ee
j ee
j ee ee ee
jee
j eee
j  f d	d
dZ  ZS )Wav2Vec2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperr   FTN)	embed_dim	num_headsr   
is_decoderrv   	is_causalr   c                    s   t    || _|| _|| _|| | _|| _| j| | jkrTtd| j d| d| jd | _|| _	|| _
tj|||d| _tj|||d| _tj|||d| _tj|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r   )rv   )rw   rx   r   r   r   head_dimr   rR   r   r   r   r   r   k_projv_projq_projout_proj)r   r   r   r   r   rv   r   r   r   r8   r9   rx     s&    



zWav2Vec2Attention.__init__)r,   key_value_statesr=   layer_head_maskoutput_attentionsr   r?   c                 K   s  |du}|j dd \}}	|r(|j d n|	}
||	d| jf}||
d| jf}| |j| dd}|rh|n|}| |j| dd}| |j| dd}t}| jj	dkrt
| jj	 }|| ||||f| jsdn| j| j||d|\}}|||	d }| |}||dfS )z#Input shape: Batch x Time x ChannelNrI   r"   r&   eagerr   )r   r   r   r   )r:   r   r   r   r   r   r   r   r   _attn_implementationr   r   r   r   re   r   r   )r   r,   r   r=   r   r   r   Zis_cross_attentionZbszZtgt_lenZsrc_lenZq_input_shapeZkv_input_shapeZquery_statesZcurrent_statesZ
key_statesZvalue_statesZattention_interfacer   r   r8   r8   r9   r     s:    


zWav2Vec2Attention.forward)r   FTFN)NNNF)r0   r1   r2   r3   rA   floatr\   r   r#   rx   r4   Tensorr   r   r7   r   r   r8   r8   r   r9   r     s8        "    r   c                       s$   e Zd Z fddZdd Z  ZS )Wav2Vec2FeedForwardc                    sp   t    t|j| _t|j|j| _	t
|jtrDt|j | _n|j| _t|j|j| _t|j| _d S r   )rw   rx   r   r   Zactivation_dropoutintermediate_dropoutr   r   Zintermediate_sizeintermediate_dense
isinstanceZ
hidden_actstrr	   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutr   r   r8   r9   rx   F  s    
zWav2Vec2FeedForward.__init__c                 C   s6   |  |}| |}| |}| |}| |}|S r   )r   r   r   r   r   r   r8   r8   r9   r   S  s    




zWav2Vec2FeedForward.forwardr   r8   r8   r   r9   r   E  s   r   c                       s&   e Zd Z fddZdddZ  ZS )Wav2Vec2EncoderLayerc                    sh   t    t|j|j|jd|d| _t|j	| _
tj|j|jd| _t|| _tj|j|jd| _d S )NFr   r   r   r   r   r   )rw   rx   r   r   num_attention_headsattention_dropout	attentionr   r   r   r   r   r   r   r   feed_forwardfinal_layer_normr   r   r8   r9   rx   ^  s    

zWav2Vec2EncoderLayer.__init__NFc                 C   sf   |}| j |||d\}}}| |}|| }| |}|| | }| |}|f}|rb||f7 }|S Nr=   r   )r   r   r   r   r   r   r,   r=   r   Zattn_residualr   rL   outputsr8   r8   r9   r   m  s    



zWav2Vec2EncoderLayer.forward)NFr   r8   r8   r   r9   r   ]  s   r   c                       s8   e Zd Z fddZdejeej edddZ  Z	S )	#Wav2Vec2EncoderLayerStableLayerNormc                    s   t    t|j|j|jd|d| _t|j	| _
tj|j|jd| _t|| _tj|j|jd| _t|dd d urt|| _nd | _d S )NFr   r   adapter_attn_dim)rw   rx   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   getattrWav2Vec2AttnAdapterLayeradapter_layerr   r   r8   r9   rx     s    

z,Wav2Vec2EncoderLayerStableLayerNorm.__init__NF)r,   r=   r   c                 C   sz   |}|  |}| j|||d\}}}| |}|| }|| | | }| jd urb|| | }|f}|rv||f7 }|S r   )r   r   r   r   r   r  r   r8   r8   r9   r     s    



z+Wav2Vec2EncoderLayerStableLayerNorm.forward)NF)
r0   r1   r2   rx   r4   r   r   r\   r   r   r8   r8   r   r9   r    s     r  c                       sX   e Zd Z fddZdejeej eeedddZ	e
ejdf ejd	d
dZ  ZS )Wav2Vec2Encoderc                    sf   t     | _t | _tj j jd| _	t
 j| _t fddt jD | _d| _d S )Nr   c                    s   g | ]}t  qS r8   )r   rJ   r   r8   r9   rM     rN   z,Wav2Vec2Encoder.__init__.<locals>.<listcomp>Frw   rx   r   r   pos_conv_embedr   r   r   r   r   r   r   r   r   rZ   num_hidden_layerslayersr   r   r   r   r9   rx     s    

 zWav2Vec2Encoder.__init__NFT)r,   r=   r   output_hidden_statesreturn_dictc                 C   s.  |rdnd }|rdnd }|d urD| ddd|jd }d|| < | ||}| |}	||	 }| |}| |}t pt| }
| j	D ]f}|r||f }t
g }| jo|| jjk }|r|
r||||d}|d }|rd}|r||d f }q|r||f }|s tdd	 |||fD S t|||d
S )Nr8   rI   r"   r&   r   r   NNc                 s   s   | ]}|d ur|V  qd S r   r8   rK   vr8   r8   r9   	<genexpr>  rN   z*Wav2Vec2Encoder.forward.<locals>.<genexpr>last_hidden_stater,   r-   )	unsqueezerepeatr:   _update_full_maskr  r   r   r
   r   r
  r4   rU   r   r   	layerdropr7   r   r   r,   r=   r   r  r  Zall_hidden_statesZall_self_attentionsZexpand_attention_maskZposition_embeddingsZsynced_gpusr   Zdropout_probabilityZskip_the_layerZlayer_outputsr8   r8   r9   r     sJ    







zWav2Vec2Encoder.forwardr=   inputs_embedsc                 C   sv   |d urr| j jdkr&d|v r |nd }nL| j jdkr@t||j}n2| j jdkrft|tjrrt|dd}nt||j}|S NZflash_attention_2r   ZsdpaZflex_attentionF)r   	r   r   r   rP   r   r4   r   r%   r   r   r=   r  r8   r8   r9   r    s    z!Wav2Vec2Encoder._update_full_mask)NFFT)r0   r1   r2   rx   r4   tensorr   r   r\   r   r   r  r   r8   r8   r   r9   r    s       ?r  c                       sB   e Zd Z fddZdddZeejdf ejdd	d
Z  Z	S )Wav2Vec2EncoderStableLayerNormc                    sf   t     | _t | _tj j jd| _	t
 j| _t fddt jD | _d| _d S )Nr   c                    s   g | ]}t  qS r8   )r  rJ   r   r8   r9   rM     rN   z;Wav2Vec2EncoderStableLayerNorm.__init__.<locals>.<listcomp>Fr  r   r   r   r9   rx     s    

z'Wav2Vec2EncoderStableLayerNorm.__init__NFTc                 C   s.  |rdnd }|rdnd }|d urD| ddd|jd }d|| < | ||}| |}	||	 }| |}t pxt| }
| jD ]f}|r||f }t	
g }| jo|| jjk }|r|
r||||d}|d }|rd}|r||d f }q| |}|r||f }|s tdd	 |||fD S t|||d
S )Nr8   rI   r"   r&   r   r   r  c                 s   s   | ]}|d ur|V  qd S r   r8   r  r8   r8   r9   r  P  rN   z9Wav2Vec2EncoderStableLayerNorm.forward.<locals>.<genexpr>r  )r  r  r:   r  r  r   r
   r   r
  r4   rU   r   r   r  r   r7   r   r  r8   r8   r9   r     sJ    







z&Wav2Vec2EncoderStableLayerNorm.forwardr  c                 C   sv   |d urr| j jdkr&d|v r |nd }nL| j jdkr@t||j}n2| j jdkrft|tjrrt|dd}nt||j}|S r  r  r  r8   r8   r9   r  X  s    z0Wav2Vec2EncoderStableLayerNorm._update_full_mask)NFFT)
r0   r1   r2   rx   r   r   r4   r   r  r   r8   r8   r   r9   r    s       
Ar  c                       s8   e Zd ZdZ fddZed	ddZd
ddZ  ZS )Wav2Vec2GumbelVectorQuantizerz
    Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
    GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information.
    c                    s   t    |j| _|j| _|j| j dkrDtd|j d| j dt	t
d| j| j |j| j | _t|jd | j| j | _d| _d S )Nr   z`config.codevector_dim z5 must be divisible by `config.num_codevector_groups` z for concatenationr"   rI   r&   )rw   rx   num_codevector_groupsr   num_codevectors_per_groupnum_varscodevector_dimrR   r   	Parameterr4   r5   codevectorsr   ry   weight_projtemperaturer   r   r8   r9   rx   u  s    

z&Wav2Vec2GumbelVectorQuantizer.__init__Nc                 C   s   |d urP|  d d d d f | j}t|| t| } | jdd|  }n| jdd}ttj|t	|d  dd  }|S )Nr   r   gHz>rI   )
flattenexpandr:   r4   whereZ
zeros_likerX   meanexplog)ZprobsmaskZmask_extendedZmarginal_probs
perplexityr8   r8   r9   _compute_perplexity  s    (z1Wav2Vec2GumbelVectorQuantizer._compute_perplexityc                 C   s  |j \}}}| |}||| | j d}| jrtjj| | j	dd
|}tj||| | jd dd}| ||}nJ|jdd}	||j d|	ddd}||| | jd}| ||}||| d}|d| j }
|
|| | j| jd}|d||d}||fS )NrI   T)tauhardr   r"         ?r   )r:   r&  r   r   r   r   r   Zgumbel_softmaxr   r'  type_asr4   r   r0  argmaxZ	new_zerosZscatter_r  r%  r"  rX   )r   r,   rl   rf   rG   r   Zcodevector_probsZcodevector_soft_distr/  Zcodevector_idxZcodevectors_per_groupr%  r8   r8   r9   r     s0    
z%Wav2Vec2GumbelVectorQuantizer.forward)N)N)	r0   r1   r2   r3   rx   staticmethodr0  r   r   r8   r8   r   r9   r  o  s
   r  c                       s$   e Zd Z fddZdd Z  ZS )Wav2Vec2Adapterc                    sp   t     j jkr8t j j| _t j| _nd  | _| _t	 fddt
 jD | _ j| _d S )Nc                 3   s   | ]}t  V  qd S r   )Wav2Vec2AdapterLayerrJ   r   r8   r9   r    rN   z+Wav2Vec2Adapter.__init__.<locals>.<genexpr>)rw   rx   output_hidden_sizer   r   r   projr   proj_layer_normr   rZ   num_adapter_layersr
  r  r   r   r   r9   rx     s    
 zWav2Vec2Adapter.__init__c                 C   sr   | j d ur(| jd ur(|  |}| |}|dd}| jD ]&}tj }| jrX|| jkr:||}q:|dd}|S r   )r:  r;  r   r
  rS   rT   r   r  )r   r,   r   Zlayerdrop_probr8   r8   r9   r     s    




zWav2Vec2Adapter.forwardr   r8   r8   r   r9   r7    s   r7  c                       s$   e Zd Z fddZdd Z  ZS )r8  c                    s0   t    tj|jd|j |j|jdd| _d S )Nr&   r"   )ru   r   )rw   rx   r   r|   r9  Zadapter_kernel_sizeadapter_strider   r   r   r8   r9   rx     s    
zWav2Vec2AdapterLayer.__init__c                 C   s   |  |}tjj|dd}|S )Nr"   r   )r   r   r   Zglur   r8   r8   r9   r     s    
zWav2Vec2AdapterLayer.forwardr   r8   r8   r   r9   r8    s   
r8  c                       s,   e Zd Z fddZejdddZ  ZS )r  c                    sZ   t    |j| _|j| _t| j| _t	| j| j| _
t | _t	| j| j| _dS )z
        Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
        up training throughput.
        N)rw   rx   r  	input_dimr   Z
hidden_dimr   r   normr   linear_1ReLUact_fnlinear_2r   r   r8   r9   rx     s    

z!Wav2Vec2AttnAdapterLayer.__init__)r,   c                 C   s,   |  |}| |}| |}| |}|S r   )r?  r@  rB  rC  r   r8   r8   r9   r     s
    



z Wav2Vec2AttnAdapterLayer.forward)r0   r1   r2   rx   r4   r5   r   r   r8   r8   r   r9   r    s   r  c                   @   s   e Zd ZU eed< dZdZdZdZdZ	dZ
dd Zdeejef ee dd	d
ZdeejdddZdd Zdd ZdedddZdS )Wav2Vec2PreTrainedModelr   wav2vec2r   Tc              	   C   s  t |tr2|j  |j  d|j_d|j_nt |trp|jjj	j
ddd |jjj	  tj|j nlt |trtjj
|jjddtd|jjd |jj   d tj|jjd nt |trtd|jj }tjj|jj| |d tjj|jj| |d nt |tjrR|jj	j
d| jjd |jdur|jj	  nt |tjtjfr|jj	  |jj	 d	 nZt |tj!rtj"|j |jdurt|j#|j|jd   }tjj|j| |d dS )
zInitialize the weightsTr   r"   )r+  stdr   r&   )abNr3  )$r   Wav2Vec2ForPreTrainingproject_hidZreset_parameters	project_qZ_is_hf_initializedr  r&  r   dataZnormal_rv   Zzero_r   inituniform_r%  r   r   mathsqrtrt   Zin_channelsZ	constant_r   r   Zin_featuresr   r   Zinitializer_ranger   r   Zfill_r|   Zkaiming_normal_r   )r   r   kr8   r8   r9   _init_weights  s@    




 z%Wav2Vec2PreTrainedModel._init_weightsN)rg   add_adapterc                 C   sn   |du r| j jn|}dd }t| j j| j jD ]\}}||||}q.|rjt| j jD ]}||d| j j}qT|S )zH
        Computes the output length of the convolutional layers
        Nc                 S   s   t j| | |ddd S )Nfloor)Zrounding_moder"   )r4   divrC   rt   ru   r8   r8   r9   _conv_out_length?  s    zRWav2Vec2PreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_lengthr"   )r   rS  zipr}   r~   rZ   r<  r=  )r   rg   rS  rW  rt   ru   rL   r8   r8   r9    _get_feat_extract_output_lengths6  s    z8Wav2Vec2PreTrainedModel._get_feat_extract_output_lengths)feature_vector_lengthr=   c                 C   s   |j ddd d df }| j||d}|tj}|jd }tj||f|j|jd}d|tj	|jd |jd|d f< |
dg d
dg }|S )NrI   r   rS  r   )rP   devicer"   )r\  )ZcumsumrY  tor4   longr:   r[   rP   r\  r^   flipr\   )r   rZ  r=   rS  Znon_padded_lengthsZoutput_lengthsrf   r8   r8   r9   "_get_feature_vector_attention_maskM  s    
"z:Wav2Vec2PreTrainedModel._get_feature_vector_attention_maskc                 C   s   | j jd u rt| j di }|  D ]6\}}t|tr(| D ]\}}||d||g< qBq(t| t	r| j
 D ]\}}||dd|g< qt|S )NzF has no adapter layers. Make sure to define `config.adapter_attn_dim`..lm_head)r   r  rR   r   Znamed_modulesr   r  Znamed_parametersjoinWav2Vec2ForCTCrb  )r   adapter_weightsr   r   
param_namer   r8   r8   r9   _get_adaptersa  s    

z%Wav2Vec2PreTrainedModel._get_adaptersc                 C   s<   |   D ]}t|tr| | qt| tr8| | j dS )zc
        (Re-)initialize attention adapter layers and lm head for adapter-only fine-tuning
        N)modulesr   r  rR  rd  rb  )r   r   r8   r8   r9   init_adapter_layersq  s
    

z+Wav2Vec2PreTrainedModel.init_adapter_layerstarget_langc                    s  | j jdu rtd| d|| jkr@|s@td| d dS |dd}|dd}|d	d}|d
d}|dd}|dd}	|dd}
|dd}|dt rdnd}|
durt	dt
 |	durtd|
}	| j j}d}|durvt|}z&t|||||||	||d	}t|}W nP ty@   |r< Y n6 tyt   |rptd| d| d| dY n0 |du rt|}z4t|||||||	||d	}t  tj|ddd}W nX ty    Y nD ty    Y n0 ty   td| d| d| dY n0 |   t| t   }t  t|  }t|dkr|td| dd| dn*t|dkrtd| dd| d|d jd }|| j jkrtj| j j|| j | j!d| _"|| j _ fd d!|# D }| j$|dd" || _dS )#a  
        Load a language adapter model from a pre-trained adapter model.

        Parameters:
            target_lang (`str`):
                Has to be a language id of an existing adapter weight. Adapter weights are stored in the format
                adapter.<lang>.safetensors or adapter.<lang>.bin
            force_load (`bool`, defaults to `True`):
                Whether the weights shall be loaded even if `target_lang` matches `self.target_lang`.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (i.e., do not try to download the model).
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `hf auth login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.

                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            mirror (`str`, *optional*):
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.

        <Tip>

        Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
        use this method in a firewalled environment.

        </Tip>

        Examples:

        ```python
        >>> from transformers import Wav2Vec2ForCTC, AutoProcessor

        >>> ckpt = "facebook/mms-1b-all"
        >>> processor = AutoProcessor.from_pretrained(ckpt)
        >>> model = Wav2Vec2ForCTC.from_pretrained(ckpt, target_lang="eng")
        >>> # set specific language
        >>> processor.tokenizer.set_target_lang("spa")
        >>> model.load_adapter("spa")
        ```
        NzCannot load_adapter for - if `config.adapter_attn_dim` is not defined.z#Adapter weights are already set to ra  	cache_dirforce_downloadFresume_downloadproxieslocal_files_onlytokenuse_auth_tokenrevisionuse_safetensorszrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.zV`token` and `use_auth_token` are both specified. Please set only the argument `token`.)filenamern  ro  rp  rq  rr  rt  rm  zCan't load the model for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z=' is the correct path to a directory containing a file named cpuT)Zmap_locationZweights_onlyr   zThe adapter weights z has unexpected keys: z, z has missing keys: zlm_head.weightr\  rP   c                    s    i | ]\}}||  | qS r8   )r]  )rK   rQ  r  re  r8   r9   
<dictcomp>:  rN   z8Wav2Vec2PreTrainedModel.load_adapter.<locals>.<dictcomp>)strict)%r   r  rR   rk  loggerwarningpopr   r   r   r   Z_name_or_pathWAV2VEC2_ADAPTER_SAFE_FILEformatr   safe_load_fileOSError	ExceptionWAV2VEC2_ADAPTER_PT_FILEr   r4   loadrg  setkeysr_   rc  r:   
vocab_sizer   r   r9  r\  rP   rb  itemsZload_state_dict)r   rk  
force_loadr   rm  rn  ro  rp  rq  rr  rs  rt  ru  Zmodel_path_or_idZ
state_dictfilepathZweight_pathZunexpected_keysZmissing_keysZtarget_vocab_sizer8   ry  r9   load_adapter~  s    ?






z$Wav2Vec2PreTrainedModel.load_adapter)N)N)T)r0   r1   r2   r#   r6   Zbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_supports_flash_attnZ_supports_sdpaZ_supports_flex_attnrR  r   r4   
LongTensorrA   r   r\   rY  r`  rg  ri  r   r  r8   r8   r8   r9   rD    s&   
(  rD  c                       s   e Zd Zed fddZdd Zdd Zdeje	ej e	ej
 d	d
dZede	ej e	ej e	ej e	e e	e e	e eeef dddZ  ZS )Wav2Vec2Modelr   c                    s   t  | || _t|| _t|| _|jdks:|jdkrRt	
t|j | _|jrdt|| _n
t|| _|jr|t|nd | _|   d S )Nr   )rw   rx   r   r   feature_extractorr   feature_projectionmask_time_probmask_feature_probr   r$  r4   r   r   rN  masked_spec_embedZdo_stable_layer_normr  encoderr  rS  r7  adapter	post_initr   r   r8   r9   rx   C  s    


zWav2Vec2Model.__init__c                 C   s   t dt |   dS z
        Calling this function will disable the gradient computation for the feature encoder so that its parameters will
        not be updated during training.
        The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.Nr   r   r   freeze_feature_encoderr   r8   r8   r9   freeze_feature_extractorW  s
    z&Wav2Vec2Model.freeze_feature_extractorc                 C   s   | j   dS 
        Calling this function will disable the gradient computation for the feature encoder so that its parameter will
        not be updated during training.
        N)r  r   r  r8   r8   r9   r  c  s    z$Wav2Vec2Model.freeze_feature_encoderN)r,   rl   r=   c                 C   s  t | jdds|S | \}}}|dur<| j|j||< nZ| jjdkr| jrt||f| jj| jj	|| jj
d}tj||jtjd}| j|j||< | jjdkr| jrt||f| jj| jj| jjd}tj||jtjd}|dddf d|d}d||< |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://huggingface.co/papers/1904.08779).
        Zapply_spec_augmentTNr   )r;   r<   r=   r>   rx  )r;   r<   r>   rI   )r  r   rm   r  r]  rP   r  r   ri   Zmask_time_lengthZmask_time_min_masksr4   r  r\  r\   r  Zmask_feature_lengthZmask_feature_min_masksr)  )r   r,   rl   r=   rf   rG   r   Zmask_feature_indicesr8   r8   r9   _mask_hidden_statesj  s4    z!Wav2Vec2Model._mask_hidden_states)r   r=   rl   r   r  r  r?   c           
      C   s   |dur|n| j j}|dur |n| j j}|dur4|n| j j}| |}|dd}|durp| j|jd |dd}| |\}}| j	|||d}| j
|||||d}	|	d }| jdur| |}|s||f|	dd  S t|||	j|	jd	S )
a/  
        mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
            masked extracted features in *config.proj_codevector_dim* space.
        Nr"   r&   Fr[  )rl   r=   r=   r   r  r  r   )r  extract_featuresr,   r-   )r   r   r  use_return_dictr  r   r`  r:   r  r  r  r  r   r,   r-   )
r   r   r=   rl   r   r  r  r  r,   Zencoder_outputsr8   r8   r9   r     s@    


zWav2Vec2Model.forward)NN)NNNNN)r0   r1   r2   r#   rx   r  r  r4   r5   r   r  r  r   r   r\   r   r7   r   r   r   r8   r8   r   r9   r  A  s2   
  .     
r  z?
    Wav2Vec2 Model with a quantizer and `VQ` head on top.
    c                       s   e Zd Zed fddZedddZdd Zd	d
 Ze	de
je
je
jedddZedee
j ee
j ee
j ee
j ee ee ee eeef dddZ  ZS )rI  r   c                    s^   t  | t|| _t|j| _t|| _	t
|j|j| _t
|j|j| _|   d S r   )rw   rx   r  rE  r   r   Zfeat_quantizer_dropoutdropout_featuresr  	quantizerr   r   Zproj_codevector_dimrJ  r#  rK  r  r   r   r8   r9   rx     s    

zWav2Vec2ForPreTraining.__init__)r'  c                 C   s   || j _dS )zb
        Set the Gumbel softmax temperature to a given value. Only necessary for training
        N)r  r'  )r   r'  r8   r8   r9   set_gumbel_temperature  s    z-Wav2Vec2ForPreTraining.set_gumbel_temperaturec                 C   s   t dt |   dS r  r  r  r8   r8   r9   r    s
    z/Wav2Vec2ForPreTraining.freeze_feature_extractorc                 C   s   | j j  dS r  rE  r  r   r  r8   r8   r9   r    s    z-Wav2Vec2ForPreTraining.freeze_feature_encoder皙?)target_featuresnegative_featurespredicted_featuresr'  c                 C   s<   t j| |gdd} t j| |  dd| }|| }|S )z
        Compute logits for contrastive loss based using cosine similarity as the distance measure between
        `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
        r   r   rI   )r4   catZcosine_similarityr   r4  )r  r  r  r'  logitsr8   r8   r9   compute_contrastive_logits  s    z1Wav2Vec2ForPreTraining.compute_contrastive_logitsN)r   r=   rl   ro   r   r  r  r?   c              
   C   s"  |dur|n| j j}|dur(|tj}| j||||||d}| |d }	| |d }
|durx| j|
j	d |dd}| j
|
|d\}}|| jjj}| |}d } }}|dur|j	\}}}|d|| d }|||d|d	ddd
}| |dddf ||	| j j}||kd}| rHtd|dd |< |dd	d|d}d|  d dd }tjj| |dd}| j j| j j }|| | |  }|| j j |  }|s|dur||	||f|d	d  S |	||f|d	d  S t!||	|||j"|j#||dS )a  
        mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
            masked extracted features in *config.proj_codevector_dim* space.
        sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
            Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
            Required input for pre-training.

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoFeatureExtractor, Wav2Vec2ForPreTraining
        >>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
        >>> from datasets import load_dataset

        >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
        >>> model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base")

        >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
        >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values  # Batch size 1

        >>> # compute masked indices
        >>> batch_size, raw_sequence_length = input_values.shape
        >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item()
        >>> mask_time_indices = _compute_mask_indices(
        ...     shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2
        ... )
        >>> sampled_negative_indices = _sample_negative_indices(
        ...     features_shape=(batch_size, sequence_length),
        ...     num_negatives=model.config.num_negatives,
        ...     mask_time_indices=mask_time_indices,
        ... )
        >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long)
        >>> sampled_negative_indices = torch.tensor(
        ...     data=sampled_negative_indices, device=input_values.device, dtype=torch.long
        ... )

        >>> with torch.no_grad():
        ...     outputs = model(input_values, mask_time_indices=mask_time_indices)

        >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)
        >>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)

        >>> # show that cosine similarity is much higher than random
        >>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5
        tensor(True)

        >>> # for contrastive loss training model should be put into train mode
        >>> model = model.train()
        >>> loss = model(
        ...     input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices
        ... ).loss
        ```N)r=   r   r  rl   r  r   r"   Fr[  )rl   rI   r&   r   z-infirX   )	reduction)r(   r)   r*   r+   r,   r-   r.   r/   )$r   r  r]  r4   r\   rE  rJ  r  r`  r:   r  rK  r   rP   r   r^  Zpermuter  Zcontrastive_logits_temperatureallanyr   r   re   rm   r(  r   r   Zcross_entropyr!  r   rX   Zdiversity_loss_weightr'   r,   r-   )r   r   r=   rl   ro   r   r  r  r   Ztransformer_featuresr  Zquantized_featuresr+   r(   r.   r/   rf   rG   r   Znegative_quantized_featuresr  Z
neg_is_postargetZnum_codevectorsr8   r8   r9   r     s|    B




	

zWav2Vec2ForPreTraining.forward)r  )NNNNNN)r0   r1   r2   r#   rx   rA   r  r  r  r6  r4   r5   r  r   r   r   Z
BoolTensorr\   r   r7   r'   r   r   r8   r8   r   r9   rI    s:          
rI  c                       s^   e Zd Z fddZedejeej ee	 ee	 ee	 eej
 eeef dddZ  ZS )Wav2Vec2ForMaskedLMc                    sN   t  | tdt t|| _t|j	| _
t|j|j| _|   d S )NzSThe class `Wav2Vec2ForMaskedLM` is deprecated. Please use `Wav2Vec2ForCTC` instead.)rw   rx   r   r   r   r  rE  r   r   final_dropoutr   r   r   r  rb  r  r   r   r8   r9   rx     s    
zWav2Vec2ForMaskedLM.__init__Nr   r=   r   r  r  labelsr?   c                 C   sn   |d ur|n| j j}| j||||d}|d }| |}| |}	|s\|	f|dd   }
|
S t|	|j|jdS )N)r   r  r  r   r&   )r  r,   r-   )r   r  rE  r   rb  r   r,   r-   )r   r   r=   r   r  r  r  r   r,   r  outputr8   r8   r9   r     s    


zWav2Vec2ForMaskedLM.forward)NNNNN)r0   r1   r2   rx   r   r4   r5   r   r  r\   r   r   r7   r   r   r   r8   r8   r   r9   r    s         
r  zp
    Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
    c                       s   e Zd Zdee d fddZdd Zdd Zd	d
 Zdd Z	e
deej eej ee ee ee eej eeef dddZ  ZS )rd  Nrj  c                    s~   t  | t|| _t|j| _|| _|j	du rFt
d| j dt|dr\|jr\|jn|j}t||j	| _|   dS )a2  
        target_lang (`str`, *optional*):
            Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
            adapter.<lang>.bin. Only relevant when using an instance of [`Wav2Vec2ForCTC`] with adapters. Uses 'eng' by
            default.
        NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.rS  )rw   rx   r  rE  r   r   r  r   rk  r  rR   r   r   rS  r9  r   r   rb  r  )r   r   rk  r9  r   r8   r9   rx     s    

zWav2Vec2ForCTC.__init__c                 C   sr   | j }|dur2t| jdddu r2td| dn<|du rXt| jdddurXtd n|durn| j|dd dS )a'  
        This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
        passing `target_lang=...` to `from_pretrained(...)`.

        This method is **not** supposed to be called by the user and is prone to be changed in the future.
        Nr  zCannot pass `target_lang`: rl  z)By default `target_lang` is set to 'eng'.T)r  )rk  r  r   rR   r|  infor  )r   rk  r8   r8   r9   tie_weights  s    zWav2Vec2ForCTC.tie_weightsc                 C   s   t dt |   dS r  r  Nr  r  r8   r8   r9   r    s
    z'Wav2Vec2ForCTC.freeze_feature_extractorc                 C   s   | j j  dS r  r  r  r8   r8   r9   r  &  s    z%Wav2Vec2ForCTC.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS z
        Calling this function will disable the gradient computation for the base model so that its parameters will not
        be updated during training. Only the classification head will be updated.
        FNrE  r   r   r   r8   r8   r9   freeze_base_model-  s    z Wav2Vec2ForCTC.freeze_base_modelr  c              
   C   s  |dur|n| j j}|dur>| | j jkr>td| j j | j|||||d}|d }| |}| |}	d}
|dur@|dur|ntj	|tj
d}| |dtj
}|dk}|d}||}tjj|	dtjddd}tjjjd	d
6 tjj||||| j j| j j| j jd}
W d   n1 s60    Y  |sp|	f|td  }|
durl|
f| S |S t|
|	|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
            Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
            the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
            All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
            config.vocab_size - 1]`.
        Nz$Label values must be <= vocab_size: r  r   rO   rI   )r   rP   r"   F)Zenabled)blankr  Zzero_infinityr(   r  r,   r-   )r   r  rB   r  rR   rE  r   rb  r4   Z	ones_liker^  rY  rX   r]  Zmasked_selectr   r   Zlog_softmaxZfloat32r   backendsZcudnnflagsZctc_lossZpad_token_idZctc_loss_reductionZctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r,   r-   )r   r   r=   r   r  r  r  r   r,   r  r(   rg   Zlabels_maskZtarget_lengthsZflattened_targetsZ	log_probsr  r8   r8   r9   r   5  sL    




&
zWav2Vec2ForCTC.forward)N)NNNNN)r0   r1   r2   r   r   rx   r  r  r  r  r   r4   r   r\   r   r7   r   r   r   r8   r8   r   r9   rd    s(        
rd  z
    Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                       sz   e Zd Z fddZdd Zdd Zdd Zedee	j
 ee	j
 ee ee ee ee	j
 eeef d
ddZ  ZS )!Wav2Vec2ForSequenceClassificationc                    s   t  | t|dr$|jr$tdt|| _|jd }|jrTt	
t|| | _t	|j|j| _t	|j|j| _|   d S )NrS  z_Sequence classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)r"   )rw   rx   r   rS  rR   r  rE  r	  use_weighted_layer_sumr   r$  r4   r`   layer_weightsr   r   Zclassifier_proj_size	projector
num_labels
classifierr  r   r   
num_layersr   r8   r9   rx     s    

z*Wav2Vec2ForSequenceClassification.__init__c                 C   s   t dt |   dS r  r  r  r8   r8   r9   r    s
    z:Wav2Vec2ForSequenceClassification.freeze_feature_extractorc                 C   s   | j j  dS r  r  r  r8   r8   r9   r    s    z8Wav2Vec2ForSequenceClassification.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS r  r  r   r8   r8   r9   r    s    z3Wav2Vec2ForSequenceClassification.freeze_base_modelNr  c                 C   s  |dur|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}|du r|jdd}
nV| |jd |}|ddd|jd }d	|| < |jdd|jdddd }
| |
}d}|dur<t }||d| j j|d}|sl|f|td  }|durh|f| S |S t|||j|jd
S )  
        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
            into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
            (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
            To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
            into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NTr  r"   r   rI   r   r&   r   r  )r   r  r  rE  r  r4   stackr   r   r   r  r   rX   r  r+  r`  r:   r  r  r  r   r  r   r,   r-   )r   r   r=   r   r  r  r  r   r,   norm_weightsZpooled_outputZpadding_maskZexpand_padding_maskr  r(   loss_fctr  r8   r8   r9   r     sH    

 

z)Wav2Vec2ForSequenceClassification.forward)NNNNN)r0   r1   r2   rx   r  r  r  r   r   r4   r   r\   r   r7   r   r   r   r8   r8   r   r9   r  }  s&        
r  c                       sz   e Zd Z fddZdd Zdd Zdd Zedee	j
 ee	j
 ee	j
 ee ee ee eeef d
ddZ  ZS )#Wav2Vec2ForAudioFrameClassificationc                    sz   t  | t|dr$|jr$tdt|| _|jd }|jrTt	
t|| | _t	|j|j| _|j| _|   d S )NrS  zbAudio frame classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)r"   )rw   rx   r   rS  rR   r  rE  r	  r  r   r$  r4   r`   r  r   r   r  r  init_weightsr  r   r8   r9   rx     s    

z,Wav2Vec2ForAudioFrameClassification.__init__c                 C   s   t dt |   dS r  r  r  r8   r8   r9   r    s
    z<Wav2Vec2ForAudioFrameClassification.freeze_feature_extractorc                 C   s   | j j  dS r  r  r  r8   r8   r9   r    s    z:Wav2Vec2ForAudioFrameClassification.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS r  r  r   r8   r8   r9   r    s    z5Wav2Vec2ForAudioFrameClassification.freeze_base_modelN)r   r=   r  r   r  r  r?   c                 C   s   |dur|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}
d}|durt }||
d| jtj|d| jdd}|s|
f|td  }|S t||
|j|jd	S )
r  NTr  r"   r   rI   r   )Zaxisr  )r   r  r  rE  r  r4   r  r   r   r   r  r   rX   r  r   r  r5  r   r,   r-   )r   r   r=   r  r   r  r  r   r,   r  r  r(   r  r  r8   r8   r9   r   #  s:    
(z+Wav2Vec2ForAudioFrameClassification.forward)NNNNN)r0   r1   r2   rx   r  r  r  r   r   r4   r   r\   r   r7   r   r   r   r8   r8   r   r9   r    s&        
r  c                       s&   e Zd Zd fdd	Zdd Z  ZS )AMSoftmaxLoss      >@皙?c                    sB   t    || _|| _|| _tjt||dd| _	t
 | _d S )NT)r   )rw   rx   scalemarginr  r   r$  r4   Zrandnr   r   r(   )r   r>  r  r  r  r   r8   r9   rx   a  s    
zAMSoftmaxLoss.__init__c           	      C   sx   |  }tjj| jdd}tjj|dd}t||}|| j }tj|| j	}| j
t| || }| ||}|S )Nr   r   r"   )r(  r   r   	normalizer   r4   mmr  Zone_hotr  r  r*  r\   r(   )	r   r,   r  r   Z	cos_thetapsiZonehotr  r(   r8   r8   r9   r   i  s    
zAMSoftmaxLoss.forward)r  r  r   r8   r8   r   r9   r  `  s   r  c                       s2   e Zd Zd fdd	ZejejdddZ  ZS )	TDNNLayerr   c                    sv   t    |dkr |j|d  n|j| | _|j| | _|j| | _|j| | _t	
| j| j | j| _t	 | _d S )Nr   r"   )rw   rx   tdnn_dimrz   r{   tdnn_kernelrt   Ztdnn_dilationdilationr   r   kernelrA  r   r   r   r8   r9   rx   x  s    
"zTDNNLayer.__init__)r,   r?   c                 C   s   t  rddlm} t  r.t| j|r.td |dd}| jj	| j
| j| jdd}tjj||| jj| jd}|dd}| |}|S )Nr   )	LoraLayerzDetected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.r"   r&   )r  )r   Zpeft.tuners.lorar  r   r  r   r   r   r   r   r{   rt   rz   r   r   Zconv1drv   r  r   )r   r,   r  r   r8   r8   r9   r     s     
zTDNNLayer.forward)r   )r0   r1   r2   rx   r4   r   r   r   r8   r8   r   r9   r  w  s   
r  zl
    Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification.
    c                       s   e Zd Z fddZdd Zdd Zdd Zeej	e
f d	d
dZedeej eej ee ee ee eej eeef dddZ  ZS )Wav2Vec2ForXVectorc                    s   t    t | _ jd } jr<tt	|| | _
t j jd | _ fddtt jD }t|| _t jd d  j| _t j j| _t j j| _|   d S )Nr"   r   c                    s   g | ]}t  |qS r8   )r  r   r   r8   r9   rM     rN   z/Wav2Vec2ForXVector.__init__.<locals>.<listcomp>rI   r&   )rw   rx   r  rE  r	  r  r   r$  r4   r`   r  r   r   r  r  rZ   r_   r   tdnnZxvector_output_dimr  r  r  r  	objectiver  )r   r   r  Ztdnn_layersr   r   r9   rx     s    

zWav2Vec2ForXVector.__init__c                 C   s   t dt |   dS r  r  r  r8   r8   r9   r    s
    z+Wav2Vec2ForXVector.freeze_feature_extractorc                 C   s   | j j  dS r  r  r  r8   r8   r9   r    s    z)Wav2Vec2ForXVector.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS r  r  r   r8   r8   r9   r    s    z$Wav2Vec2ForXVector.freeze_base_model)rg   c                 C   s&   dd }| j jD ]}|||d}q|S )z?
        Computes the output length of the TDNN layers
        c                 S   s   | | | d S )Nr"   r8   rV  r8   r8   r9   rW    s    zEWav2Vec2ForXVector._get_tdnn_output_lengths.<locals>._conv_out_lengthr"   )r   r  )r   rg   rW  rt   r8   r8   r9   _get_tdnn_output_lengths  s    z+Wav2Vec2ForXVector._get_tdnn_output_lengthsNr  c                 C   s  |dur|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}| jD ]}
|
|}q|du r|jdd}|jdd}n| |jdd}| |}g }g }t|D ]D\}}|||d|f jdd |||d|f jdd qt|}t|}tj||gdd}| |}| |}d}|dur| ||}|s||f|td  }|dur|f| S |S t||||j|jdS )	r  NTr  r"   r   rI   r   )r(   r  Z
embeddingsr,   r-   )r   r  r  rE  r  r4   r  r   r   r   r  r   rX   r  r  r+  rF  rY  r  	enumeraterb   r  r  r  r  r   r,   r-   )r   r   r=   r   r  r  r  r   r,   r  Z
tdnn_layerZmean_featuresZstd_featuresZfeat_extract_output_lengthsZtdnn_output_lengthsr   lengthZstatistic_poolingZoutput_embeddingsr  r(   r  r8   r8   r9   r     s\    



 




zWav2Vec2ForXVector.forward)NNNNN)r0   r1   r2   rx   r  r  r  r   r4   r  rA   r  r   r   r   r\   r7   r   r   r   r8   r8   r   r9   r    s(        
r  )r  rd  r  rI  r  r  r  rD  )Nr   )N)Nr   N)hr3   rO  r   dataclassesr   typingr   r   r   numpyrS   r4   Ztorch.utils.checkpointr   Ztorch.nnr   Zactivationsr	   Zintegrations.deepspeedr
   Zintegrations.fsdpr   Zmodeling_attn_mask_utilsr   r   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   r   r   r   r   Zmodeling_utilsr   r   Zprocessing_utilsr   r   r   r   r   r   r   r   r    r!   Zconfiguration_wav2vec2r#   r  r  Zsafetensors.torchr$   r  Zintegrations.flex_attentionr%   Z
get_loggerr0   r|  r  r'   r7   rA   r   r  Zndarrayri   rq   rr   r   r   Moduler   r   r   r   r   r   r   r   r   r   r  r  r  r  r7  r8  r  rD  r  rI  r  rd  r  r  r  r  r  __all__r8   r8   r8   r9   <module>   s   $	(

!  
x $-(   X$.^bL  =  ]- si  