a
    ¾Àh!  ã                   @   sH   d dl mZ ddlmZmZ G dd„ deƒZG dd„ deƒZddgZdS )	é   )ÚPretrainedConfigé   )ÚCONFIG_MAPPINGÚ
AutoConfigc                       s6   e Zd ZdZdZddddddœZd‡ fdd„	Z‡  ZS )ÚVoxtralEncoderConfiga`
  
    This is the configuration class to store the configuration of a [`VoxtralEncoder`]. It is used to instantiate a
    Voxtral audio encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the audio encoder of the Voxtral
    architecture.

    e.g. [mistralai/Voxtral-Mini-3B-2507](https://huggingface.co/mistralai/Voxtral-Mini-3B-2507)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 51866):
            Vocabulary size of the model.
        hidden_size (`int`, *optional*, defaults to 1280):
            Dimensionality of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 5120):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 20):
            Number of attention heads for each attention layer in the Transformer encoder.
        scale_embedding (`bool`, *optional*, defaults to `False`):
            Scale embeddings by dividing by sqrt(hidden_size) if True.
        activation_function (`str`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu",
        num_mel_bins (`int`, *optional*, defaults to 128):
            Number of mel features used per input features. Should correspond to the value used in the
            `VoxtralProcessor` class.
        max_source_positions (`int`, *optional*, defaults to 1500):
            The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.

    ```python
    >>> from transformers import VoxtralEncoderConfig, VoxtralEncoder

    >>> # Initializing a VoxtralEncoderConfig
    >>> configuration = VoxtralEncoderConfig()

    >>> # Initializing a VoxtralEncoder (with random weights)
    >>> model = VoxtralEncoder(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Úvoxtral_encoderÚhidden_sizeÚnum_hidden_layersÚnum_attention_headsÚintermediate_sizeÚ	layerdrop)Zd_modelZencoder_layersZencoder_attention_headsZencoder_ffn_dimZencoder_layerdropéšÊ  é   é   é    é   FÚgelué€   éÜ  ç{®Gáz”?ç        c                    sj   t ƒ jf i |¤Ž || _|| _|| _|| _|| _|| _|| _|| _	|	| _
|
| _d| _d| _d| _|| _d S )Nr   )ÚsuperÚ__init__Ú
vocab_sizer   r   r	   r
   Úscale_embeddingÚactivation_functionÚnum_mel_binsÚmax_source_positionsÚinitializer_rangeZdropoutr   Zactivation_dropoutÚattention_dropout)Úselfr   r   r   r	   r
   r   r   r   r   r   r   Úkwargs©Ú	__class__© úm/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/voxtral/configuration_voxtral.pyr   P   s    zVoxtralEncoderConfig.__init__)r   r   r   r   r   Fr   r   r   r   r   )Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ú
model_typeZattribute_mapr   Ú__classcell__r$   r$   r"   r%   r      s(   1û
           ôr   c                       sJ   e Zd ZdZdZeedœZddddddd	d
dddœ
Zd‡ fdd„	Z‡  Z	S )ÚVoxtralConfiga3  
    This is the configuration class to store the configuration of a [`VoxtralForConditionalGeneration`]. It is used to instantiate an
    Voxtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Voxtral-Mini-3B.

    e.g. [mistralai/Voxtral-Mini-3B-2507](https://huggingface.co/mistralai/Voxtral-Mini-3B-2507)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        audio_config (`Union[AutoConfig, dict]`, *optional*):
            The config object or dictionary of the audio encoder.
        text_config (`Union[AutoConfig, dict]`, *optional*):
            The config object or dictionary of the text model.
        audio_token_id (`int`, *optional*):
            The image token index to encode the image prompt.
        projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
            The activation function (function or string) in the multi-modal projector.

    ```python
    >>> from transformers import VoxtralForConditionalGeneration, VoxtralConfig

    >>> # Initializing a Voxtral configuration
    >>> configuration = VoxtralConfig(audio_token_id=24, projector_hidden_act="gelu")

    >>> # Initializing a 3B model with random weights
    >>> model = VoxtralForConditionalGeneration(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zvoxtral)Útext_configÚaudio_configi   i   i    é   é   gñhãˆµøä>Tg    „×—Ar   )
r   r   r   r	   Znum_key_value_headsZmax_position_embeddingsZrms_norm_epsZ	use_cacheZ
rope_thetaZhead_dimNr   c                    sÚ   t |tƒr2| dd¡|d< t|d  f i |¤Ž}n|d u rDtd ƒ }|| _t |tƒr†| dd¡|d< t|d  f i i | j¥|¥¤Ž}n|d u r¢td f i | j¤Ž}|| _|j| _|j| _|| _	|| _
tƒ jf i |¤Ž d S )Nr*   r   Úllama)Ú
isinstanceÚdictÚgetr   r.   Ú_default_text_config_kwargsr-   r   r   Úaudio_token_idÚprojector_hidden_actr   r   )r    r.   r-   r6   r7   r!   r"   r$   r%   r   ©   s&    


ÿzVoxtralConfig.__init__)NNNr   )
r&   r'   r(   r)   r*   r   Zsub_configsr5   r   r+   r$   r$   r"   r%   r,   w   s&   !
ö    ûr,   N)Zconfiguration_utilsr   Úautor   r   r   r,   Ú__all__r$   r$   r$   r%   Ú<module>   s   cR