a
    h"                     @   sN  d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlmZmZ eeZeG dd dZeG dd	 d	Ze	eeee f  e	e ee d
ddZG dd deZG dd deZG dd deZeeeef  e
eee f dddZdd Zeee eeeef  dddZe	ee  e	ee  dddZdS )    N)defaultdict)	dataclass)AnyDictListOptionalTupleUnion)logging	yaml_dumpc                   @   s:  e Zd ZU dZeed< eed< eed< eed< eed< dZee ed< dZ	ee ed	< dZ
ee ed
< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZee ed< dZee ed< eedddZd edddZddddZdS )
EvalResultu  
    Flattened representation of individual evaluation results found in model-index of Model Cards.

    For more information on the model-index spec, see https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1.

    Args:
        task_type (`str`):
            The task identifier. Example: "image-classification".
        dataset_type (`str`):
            The dataset identifier. Example: "common_voice". Use dataset id from https://hf.co/datasets.
        dataset_name (`str`):
            A pretty name for the dataset. Example: "Common Voice (French)".
        metric_type (`str`):
            The metric identifier. Example: "wer". Use metric id from https://hf.co/metrics.
        metric_value (`Any`):
            The metric value. Example: 0.9 or "20.0 ± 1.2".
        task_name (`str`, *optional*):
            A pretty name for the task. Example: "Speech Recognition".
        dataset_config (`str`, *optional*):
            The name of the dataset configuration used in `load_dataset()`.
            Example: fr in `load_dataset("common_voice", "fr")`. See the `datasets` docs for more info:
            https://hf.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name
        dataset_split (`str`, *optional*):
            The split used in `load_dataset()`. Example: "test".
        dataset_revision (`str`, *optional*):
            The revision (AKA Git Sha) of the dataset used in `load_dataset()`.
            Example: 5503434ddd753f426f4b38109466949a1217c2bb
        dataset_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}`
        metric_name (`str`, *optional*):
            A pretty name for the metric. Example: "Test WER".
        metric_config (`str`, *optional*):
            The name of the metric configuration used in `load_metric()`.
            Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
            See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations
        metric_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
        verified (`bool`, *optional*):
            Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
        verify_token (`str`, *optional*):
            A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
        source_name (`str`, *optional*):
            The name of the source of the evaluation result. Example: "Open LLM Leaderboard".
        source_url (`str`, *optional*):
            The URL of the source of the evaluation result. Example: "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard".
    	task_typedataset_typedataset_namemetric_typemetric_valueN	task_namedataset_configdataset_splitdataset_revisiondataset_argsmetric_namemetric_configmetric_argsverifiedverify_tokensource_name
source_urlreturnc                 C   s   | j | j| j| j| jfS )z9Returns a tuple that uniquely identifies this evaluation.)r   r   r   r   r   self r"   Y/var/www/html/assistant/venv/lib/python3.9/site-packages/huggingface_hub/repocard_data.pyunique_identifier   s    zEvalResult.unique_identifier)otherr   c                 C   sD   | j  D ]4\}}|dkrq
|dkr
t| |t||kr
 dS q
dS )zx
        Return True if `self` and `other` describe exactly the same metric but with a
        different value.
        r   r   FT)__dict__itemsgetattr)r!   r%   key_r"   r"   r#   is_equal_except_value   s    z EvalResult.is_equal_except_valuec                 C   s    | j d ur| jd u rtdd S )NzAIf `source_name` is provided, `source_url` must also be provided.)r   r   
ValueErrorr    r"   r"   r#   __post_init__   s    zEvalResult.__post_init__)__name__
__module____qualname____doc__str__annotations__r   r   r   r   r   r   r   r   r   r   r   r   boolr   r   r   propertytupler$   r+   r-   r"   r"   r"   r#   r      s,   
3
r   c                   @   s   e Zd ZdZd"edddZdd Zdd	 Zd#ee	e
  e
dddZdd Zdd Zd$e
eedddZd%e
eedddZe
edddZe
ed
dddZe
edddZedd d!Zd
S )&CardDataa  Structure containing metadata from a RepoCard.

    [`CardData`] is the parent class of [`ModelCardData`] and [`DatasetCardData`].

    Metadata can be exported as a dictionary or YAML. Export can be customized to alter the representation of the data
    (example: flatten evaluation results). `CardData` behaves as a dictionary (can get, pop, set values) but do not
    inherit from `dict` to allow this export step.
    F)ignore_metadata_errorsc                 K   s   | j | d S N)r&   update)r!   r8   kwargsr"   r"   r#   __init__   s    zCardData.__init__c                 C   s(   t | j}| | dd | D S )zConverts CardData to a dict.

        Returns:
            `dict`: CardData represented as a dictionary ready to be dumped to a YAML
            block for inclusion in a README.md file.
        c                 S   s   i | ]\}}|d ur||qS r9   r"   ).0r)   valuer"   r"   r#   
<dictcomp>       z$CardData.to_dict.<locals>.<dictcomp>)copydeepcopyr&   _to_dictr'   r!   Z	data_dictr"   r"   r#   to_dict   s    
zCardData.to_dictc                 C   s   dS )zUse this method in child classes to alter the dict representation of the data. Alter the dict in-place.

        Args:
            data_dict (`dict`): The raw dict representation of the card data.
        Nr"   rD   r"   r"   r#   rC      s    zCardData._to_dictN)original_orderr   c                    sH   |r2 fdd|t t j t|  D  _t  d|d S )a
  Dumps CardData to a YAML block for inclusion in a README.md file.

        Args:
            line_break (str, *optional*):
                The line break to use when dumping to yaml.

        Returns:
            `str`: CardData represented as a YAML block.
        c                    s"   i | ]}| j v r| j | qS r"   r&   )r=   kr    r"   r#   r?      s   
z$CardData.to_yaml.<locals>.<dictcomp>F)	sort_keys
line_break)listsetr&   keysr   rE   strip)r!   rJ   rF   r"   r    r#   to_yaml   s
    

zCardData.to_yamlc                 C   s
   t | jS r9   )reprr&   r    r"   r"   r#   __repr__   s    zCardData.__repr__c                 C   s   |   S r9   )rO   r    r"   r"   r#   __str__   s    zCardData.__str__)r)   defaultr   c                 C   s   | j |}|du r|S |S )#Get value for a given metadata key.N)r&   get)r!   r)   rS   r>   r"   r"   r#   rU      s    zCardData.getc                 C   s   | j ||S )z#Pop value for a given metadata key.)r&   pop)r!   r)   rS   r"   r"   r#   rV      s    zCardData.pop)r)   r   c                 C   s
   | j | S )rT   rG   r!   r)   r"   r"   r#   __getitem__   s    zCardData.__getitem__)r)   r>   r   c                 C   s   || j |< dS )z#Set value for a given metadata key.NrG   )r!   r)   r>   r"   r"   r#   __setitem__   s    zCardData.__setitem__c                 C   s
   || j v S )z%Check if a given metadata key is set.rG   rW   r"   r"   r#   __contains__   s    zCardData.__contains__r   c                 C   s
   t | jS )z'Return the number of metadata keys set.)lenr&   r    r"   r"   r#   __len__   s    zCardData.__len__)F)NN)N)N)r.   r/   r0   r1   r4   r<   rE   rC   r   r   r2   rO   rQ   rR   r   rU   rV   rX   rY   rZ   intr\   r"   r"   r"   r#   r7      s   	r7   )eval_results
model_namer   c                 C   s`   | d u rg S t | tr| g} t | tr8tdd | D sLtdt|  d|d u r\td| S )Nc                 s   s   | ]}t |tV  qd S r9   )
isinstancer   )r=   rr"   r"   r#   	<genexpr>   r@   z)_validate_eval_results.<locals>.<genexpr>zM`eval_results` should be of type `EvalResult` or a list of `EvalResult`, got .z7Passing `eval_results` requires `model_name` to be set.)r`   r   rK   allr,   type)r^   r_   r"   r"   r#   _validate_eval_results   s    
rf   c                       s   e Zd ZdZddddddddddddddeeeee f  eeeee f  eee  eeeee f  ee ee ee ee eee  ee ee eee  e	d fddZ
dd Z  ZS )	ModelCardDataaQ  Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        base_model (`str` or `List[str]`, *optional*):
            The identifier of the base model from which the model derives. This is applicable for example if your model is a
            fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
            if your model derives from multiple models). Defaults to None.
        datasets (`Union[str, List[str]]`, *optional*):
            Dataset or list of datasets that were used to train this model. Should be a dataset ID
            found on https://hf.co/datasets. Defaults to None.
        eval_results (`Union[List[EvalResult], EvalResult]`, *optional*):
            List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
            `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`.
        language (`Union[str, List[str]]`, *optional*):
            Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`.
        library_name (`str`, *optional*):
            Name of library used by this model. Example: keras or any library from
            https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries.ts.
            Defaults to None.
        license (`str`, *optional*):
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses. Defaults to None.
        license_name (`str`, *optional*):
            Name of the license of this model. Defaults to None. To be used in conjunction with `license_link`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a name. In that case, use `license` instead.
        license_link (`str`, *optional*):
            Link to the license of this model. Defaults to None. To be used in conjunction with `license_name`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a link. In that case, use `license` instead.
        metrics (`List[str]`, *optional*):
            List of metrics used to evaluate this model. Should be a metric name that can be found
            at https://hf.co/metrics. Example: 'accuracy'. Defaults to None.
        model_name (`str`, *optional*):
            A name for this model. It is used along with
            `eval_results` to construct the `model-index` within the card's metadata. The name
            you supply here is what will be used on PapersWithCode's leaderboards. If None is provided
            then the repo name is used as a default. Defaults to None.
        pipeline_tag (`str`, *optional*):
            The pipeline tag associated with the model. Example: "text-classification".
        tags (`List[str]`, *optional*):
            List of tags to add to your model that can be used when filtering on the Hugging
            Face Hub. Defaults to None.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the model card. Defaults to None.

    Example:
        ```python
        >>> from huggingface_hub import ModelCardData
        >>> card_data = ModelCardData(
        ...     language="en",
        ...     license="mit",
        ...     library_name="timm",
        ...     tags=['image-classification', 'resnet'],
        ... )
        >>> card_data.to_dict()
        {'language': 'en', 'license': 'mit', 'library_name': 'timm', 'tags': ['image-classification', 'resnet']}

        ```
    NF)
base_modeldatasetsr^   languagelibrary_namelicenselicense_namelicense_linkmetricsr_   pipeline_tagtagsr8   c             
      sN  || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
t|| _|dd }|rzt|\}
}|
| _	|| _W nP ttfy } z4|rtd ntd|j d| dW Y d }~n
d }~0 0 t jf i | | jrJzt| j| j	| _W nP tyH } z6|r$td| d ntd| |W Y d }~n
d }~0 0 d S )Nmodel-indexz<Invalid model-index. Not loading eval results into CardData.z4Invalid `model_index` in metadata cannot be parsed:  z. Pass `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning: some information will be lost. Use it at your own risk.z!Failed to validate eval_results: z). Not loading eval results into CardData.)rh   ri   r^   rj   rk   rl   rm   rn   ro   r_   rp   _to_unique_listrq   rV   model_index_to_eval_resultsKeyError	TypeErrorloggerwarningr,   	__class__superr<   rf   	Exception)r!   rh   ri   r^   rj   rk   rl   rm   rn   ro   r_   rp   rq   r8   r;   model_indexerrorerz   r"   r#   r<   I  s@    

zModelCardData.__init__c                 C   s,   | j dur(t| j| j |d< |d= |d= dS )z[Format the internal data dict. In this case, we convert eval results to a valid model indexNrr   r^   r_   )r^   eval_results_to_model_indexr_   rD   r"   r"   r#   rC     s    
zModelCardData._to_dict)r.   r/   r0   r1   r   r	   r2   r   r   r4   r<   rC   __classcell__r"   r"   r   r#   rg   	  s<   B


:rg   c                       s  e Zd ZdZdddddddddddddddeeeee f  eeeee f  eeeee f  eeeee f  eeeee f  eeeee f  eee  eeeee f  eeeee f  ee ee ee eeeee f  e	d fddZ
dd Z  ZS )	DatasetCardDataa	  Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        language (`List[str]`, *optional*):
            Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual".
        license (`Union[str, List[str]]`, *optional*):
            License(s) of this dataset. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        annotations_creators (`Union[str, List[str]]`, *optional*):
            How the annotations for the dataset were created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'.
        language_creators (`Union[str, List[str]]`, *optional*):
            How the text-based data in the dataset was created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other'
        multilinguality (`Union[str, List[str]]`, *optional*):
            Whether the dataset is multilingual.
            Options are: 'monolingual', 'multilingual', 'translation', 'other'.
        size_categories (`Union[str, List[str]]`, *optional*):
            The number of examples in the dataset. Options are: 'n<1K', '1K<n<10K', '10K<n<100K',
            '100K<n<1M', '1M<n<10M', '10M<n<100M', '100M<n<1B', '1B<n<10B', '10B<n<100B', '100B<n<1T', 'n>1T', and 'other'.
        source_datasets (`List[str]]`, *optional*):
            Indicates whether the dataset is an original dataset or extended from another existing dataset.
            Options are: 'original' and 'extended'.
        task_categories (`Union[str, List[str]]`, *optional*):
            What categories of task does the dataset support?
        task_ids (`Union[str, List[str]]`, *optional*):
            What specific tasks does the dataset support?
        paperswithcode_id (`str`, *optional*):
            ID of the dataset on PapersWithCode.
        pretty_name (`str`, *optional*):
            A more human-readable name for the dataset. (ex. "Cats vs. Dogs")
        train_eval_index (`Dict`, *optional*):
            A dictionary that describes the necessary spec for doing evaluation on the Hub.
            If not provided, it will be gathered from the 'train-eval-index' key of the kwargs.
        config_names (`Union[str, List[str]]`, *optional*):
            A list of the available dataset configs for the dataset.
    NF)rj   rl   annotations_creatorslanguage_creatorsmultilingualitysize_categoriessource_datasetstask_categoriestask_idspaperswithcode_idpretty_nametrain_eval_indexconfig_namesr8   c                   sp   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
|| _|pV|dd | _t jf i | d S )Ntrain-eval-index)r   r   rj   rl   r   r   r   r   r   r   r   r   rV   r   r{   r<   )r!   rj   rl   r   r   r   r   r   r   r   r   r   r   r   r8   r;   r   r"   r#   r<     s    zDatasetCardData.__init__c                 C   s   | d|d< d S )Nr   r   )rV   rD   r"   r"   r#   rC     s    zDatasetCardData._to_dict)r.   r/   r0   r1   r   r	   r2   r   r   r4   r<   rC   r   r"   r"   r   r#   r     s@   *
$r   c                       s   e Zd ZdZdddddddddddddee ee ee ee ee ee ee ee eee  eee  eee  ed fddZ	  Z
S )SpaceCardDataa	  Space Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    To get an exhaustive reference of Spaces configuration, please visit https://huggingface.co/docs/hub/spaces-config-reference#spaces-configuration-reference.

    Args:
        title (`str`, *optional*)
            Title of the Space.
        sdk (`str`, *optional*)
            SDK of the Space (one of `gradio`, `streamlit`, `docker`, or `static`).
        sdk_version (`str`, *optional*)
            Version of the used SDK (if Gradio/Streamlit sdk).
        python_version (`str`, *optional*)
            Python version used in the Space (if Gradio/Streamlit sdk).
        app_file (`str`, *optional*)
            Path to your main application file (which contains either gradio or streamlit Python code, or static html code).
            Path is relative to the root of the repository.
        app_port (`str`, *optional*)
            Port on which your application is running. Used only if sdk is `docker`.
        license (`str`, *optional*)
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        duplicated_from (`str`, *optional*)
            ID of the original Space if this is a duplicated Space.
        models (List[`str`], *optional*)
            List of models related to this Space. Should be a dataset ID found on https://hf.co/models.
        datasets (`List[str]`, *optional*)
            List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets.
        tags (`List[str]`, *optional*)
            List of tags to add to your Space that can be used when filtering on the Hub.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the space card.

    Example:
        ```python
        >>> from huggingface_hub import SpaceCardData
        >>> card_data = SpaceCardData(
        ...     title="Dreambooth Training",
        ...     license="mit",
        ...     sdk="gradio",
        ...     duplicated_from="multimodalart/dreambooth-training"
        ... )
        >>> card_data.to_dict()
        {'title': 'Dreambooth Training', 'sdk': 'gradio', 'license': 'mit', 'duplicated_from': 'multimodalart/dreambooth-training'}
        ```
    NF)titlesdksdk_versionpython_versionapp_fileapp_portrl   duplicated_frommodelsri   rq   r8   c                   s\   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	t
|| _t jf i | d S r9   )r   r   r   r   r   r   rl   r   r   ri   rt   rq   r{   r<   )r!   r   r   r   r   r   r   rl   r   r   ri   rq   r8   r;   r   r"   r#   r<     s    
zSpaceCardData.__init__)r.   r/   r0   r1   r   r2   r]   r   r4   r<   r   r"   r"   r   r#   r     s6   4


r   )r}   r   c                 C   sF  g }| D ]2}|d }|d }|D ]}|d d }|d  d}|d d }|d d }	|d  d}
|d  d}|d  d}|d  d	}| d
i  d}| d
i  d}|d D ]z}|d }|d }| d}| d	}| d}| d}| d}t|||	||||
||||||||||d}|| qq"q||fS )a  Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects.

    A detailed spec of the model index can be found here:
    https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1

    Args:
        model_index (`List[Dict[str, Any]]`):
            A model index data structure, likely coming from a README.md file on the
            Hugging Face Hub.

    Returns:
        model_name (`str`):
            The name of the model as found in the model index. This is used as the
            identifier for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            A list of `huggingface_hub.EvalResult` objects containing the metrics
            reported in the provided model_index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import model_index_to_eval_results
        >>> # Define a minimal model index
        >>> model_index = [
        ...     {
        ...         "name": "my-cool-model",
        ...         "results": [
        ...             {
        ...                 "task": {
        ...                     "type": "image-classification"
        ...                 },
        ...                 "dataset": {
        ...                     "type": "beans",
        ...                     "name": "Beans"
        ...                 },
        ...                 "metrics": [
        ...                     {
        ...                         "type": "accuracy",
        ...                         "value": 0.9
        ...                     }
        ...                 ]
        ...             }
        ...         ]
        ...     }
        ... ]
        >>> model_name, eval_results = model_index_to_eval_results(model_index)
        >>> model_name
        'my-cool-model'
        >>> eval_results[0].task_type
        'image-classification'
        >>> eval_results[0].metric_type
        'accuracy'

        ```
    nameresultstaskre   datasetconfigsplitrevisionargssourceurlro   r>   r   verifyToken)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   )rU   r   append)r}   r^   elemr   r   resultr   r   r   r   r   r   r   r   r   r   Zmetricr   r   r   r   r   r   r   eval_resultr"   r"   r#   ru   +  sX    8






ru   c                 C   sR   t | tttfr&t| dd | D S t | trJt| dd |  D S | S dS )zk
    Recursively remove `None` values from a dict. Borrowed from: https://stackoverflow.com/a/20558778
    c                 s   s   | ]}|d urt |V  qd S r9   _remove_none)r=   xr"   r"   r#   rb     r@   z_remove_none.<locals>.<genexpr>c                 s   s2   | ]*\}}|d ur|d urt |t |fV  qd S r9   r   )r=   rH   vr"   r"   r#   rb     r@   N)r`   rK   r6   rL   re   dictr'   )objr"   r"   r#   r     s
    
r   )r_   r^   r   c           
   	   C   s   t t}|D ]}||j | qg }| D ]}|d }|j|jd|j|j|j	|j
|j|jddd |D d}|jdurd|ji}|jdur|j|d	< ||d
< || q.| |dg}	t|	S )a  Takes in given model name and list of `huggingface_hub.EvalResult` and returns a
    valid model-index that will be compatible with the format expected by the
    Hugging Face Hub.

    Args:
        model_name (`str`):
            Name of the model (ex. "my-cool-model"). This is used as the identifier
            for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            List of `huggingface_hub.EvalResult` objects containing the metrics to be
            reported in the model-index.

    Returns:
        model_index (`List[Dict[str, Any]]`): The eval_results converted to a model-index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import eval_results_to_model_index, EvalResult
        >>> # Define minimal eval_results
        >>> eval_results = [
        ...     EvalResult(
        ...         task_type="image-classification",  # Required
        ...         dataset_type="beans",  # Required
        ...         dataset_name="Beans",  # Required
        ...         metric_type="accuracy",  # Required
        ...         metric_value=0.9,  # Required
        ...     )
        ... ]
        >>> eval_results_to_model_index("my-cool-model", eval_results)
        [{'name': 'my-cool-model', 'results': [{'task': {'type': 'image-classification'}, 'dataset': {'name': 'Beans', 'type': 'beans'}, 'metrics': [{'type': 'accuracy', 'value': 0.9}]}]}]

        ```
    r   )re   r   )r   re   r   r   r   r   c              
   S   s.   g | ]&}|j |j|j|j|j|j|jd qS ))re   r>   r   r   r   r   r   )r   r   r   r   r   r   r   )r=   r   r"   r"   r#   
<listcomp>  s   
z/eval_results_to_model_index.<locals>.<listcomp>)r   r   ro   Nr   r   r   )r   r   )r   rK   r$   r   valuesr   r   r   r   r   r   r   r   r   r   r   )
r_   r^   Ztask_and_ds_types_mapr   Zmodel_index_datar   Zsample_resultdatar   r}   r"   r"   r#   r     s@    %



r   )rq   r   c                 C   s0   | d u r| S g }| D ]}||vr| | q|S r9   )r   )rq   Zunique_tagstagr"   r"   r#   rt     s    rt   )rA   collectionsr   dataclassesr   typingr   r   r   r   r   r	   Zhuggingface_hub.utilsr
   r   Z
get_loggerr.   rx   r   r7   r2   rf   rg   r   r   ru   r   r   rt   r"   r"   r"   r#   <module>   s,    
 T PQ(h"\