a
    ¾ÀhS  ã                   @   s@   d Z ddlmZ ddlmZ e e¡ZG dd„ deƒZdgZ	dS )zTimesFM model configurationé   )ÚPretrainedConfig)Úloggingc                       s‚   e Zd ZdZdZg ZdZdddddd	d	d
dddg d¢ddddddfeeeeeeeeeeee	e eee
eeedœ‡ fdd„Z‡  ZS )ÚTimesFmConfiga%  
    This is the configuration class to store the configuration of a [`TimesFmModelForPrediction`] or a [`TFTimesFmModel`]. It is used to
    instantiate a TimesFM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the TimesFM
    [google/timesfm-2.0-500m-pytorch](https://huggingface.co/google/timesfm-2.0-500m-pytorch) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Arguments:
        patch_length (`int`, *optional*, defaults to 32):
            The length of one patch in the input sequence.
        context_length (`int`, *optional*, defaults to 512):
            The length of the input context.
        horizon_length (`int`, *optional*, defaults to 128):
            The length of the prediction horizon.
        freq_size (`int`, *optional*, defaults to 3):
            The number of frequency embeddings.
        num_hidden_layers (`int`, *optional*, defaults to 50):
            Number of Transformer layers.
        hidden_size (`int`, *optional*, defaults to 1280):
            Size of the hidden layers in the feed-forward networks.
        intermediate_size (`int`, *optional*, defaults to 1280):
            Dimension of the MLP representations.
        head_dim (`int`, *optional*, defaults to 80):
            Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
            be defined as `num_attention_heads * head_dim`.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        tolerance (`float`, *optional*, defaults to 1e-06):
            The tolerance for the quantile loss.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the RMS normalization layers.
        quantiles (`list[float]`, *optional*, defaults to `[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]`):
            The quantiles to predict.
        pad_val (`float`, *optional*, defaults to 1123581321.0):
            The value used to pad the predictions.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for the attention scores.
        use_positional_embedding (`bool`, *optional*, defaults to `False`):
            Whether to add positional embeddings.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        min_timescale (`int`, *optional*, defaults to 1):
            The start of the geometric positional index. Determines the periodicity of
            the added signal.
        max_timescale (`int`, *optional*, defaults to 10000):
            The end of the geometric positional index. Determines the frequency of the
            added signal.
    ZtimesfmFé    i   é€   r   é2   i   éP   é   gíµ ÷Æ°>)	gš™™™™™¹?gš™™™™™É?g333333Ó?gš™™™™™Ù?g      à?g333333ã?gffffffæ?gš™™™™™é?gÍÌÌÌÌÌì?g  @b¾ÐAg        g{®Gáz”?é   i'  )Úpatch_lengthÚcontext_lengthÚhorizon_lengthÚ	freq_sizeÚnum_hidden_layersÚhidden_sizeÚintermediate_sizeÚhead_dimÚnum_attention_headsÚ	toleranceÚrms_norm_epsÚ	quantilesÚpad_valÚattention_dropoutÚuse_positional_embeddingÚinitializer_rangeÚmin_timescaleÚmax_timescalec                    sˆ   || _ || _|| _|| _|| _|| _|| _|| _|| _|| _	|	| _
|
| _|| _|| _|| _|| _|| _|| _tƒ jf d| ji|¤Ž d S )NÚis_encoder_decoder)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   ÚsuperÚ__init__r   )Úselfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Úkwargs©Ú	__class__© úm/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/models/timesfm/configuration_timesfm.pyr   P   s.    
ÿþzTimesFmConfig.__init__)Ú__name__Ú
__module__Ú__qualname__Ú__doc__Z
model_typeZkeys_to_ignore_at_inferencer   ÚintÚfloatÚlistÚboolr   Ú__classcell__r$   r$   r"   r%   r      sT   3íír   N)
r)   Zconfiguration_utilsr   Úutilsr   Z
get_loggerr&   Úloggerr   Ú__all__r$   r$   r$   r%   Ú<module>   s
   
g