a
    ãÀhÁ  ã                   @  sZ   d dl mZ d dlZd dlmZmZ erHd dlZd dlm	Z	 d dl
mZ G dd„ dƒZdS )é    )ÚannotationsN)ÚTYPE_CHECKINGÚAny)ÚTensor)ÚSentenceTransformerc                   @  sŒ   e Zd ZdZdd„ Zd"ddddd	d
œdd„Zddddœdd„Zd#ddddddœdd„Zeddœdd„ƒZ	ddœdd„Z
ddddœd d!„ZdS )$ÚSentenceEvaluatoraÀ  
    Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
    attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
    for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.

    The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
    the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
    metric, i.e. the one that is used for model selection and/or logging.

    Extend this class and implement __call__ for custom evaluators.
    c                 C  s   d| _ d | _d S )NT)Zgreater_is_betterÚprimary_metric©Úself© r   ún/var/www/html/assistant/venv/lib/python3.9/site-packages/sentence_transformers/evaluation/SentenceEvaluator.pyÚ__init__   s    zSentenceEvaluator.__init__Néÿÿÿÿr   z
str | NoneÚintzfloat | dict[str, float])ÚmodelÚoutput_pathÚepochÚstepsÚreturnc                 C  s   dS )aÏ  
        This is called during training to evaluate the model.
        It returns a score for the evaluation with a higher score indicating a better result.

        Args:
            model: the model to evaluate
            output_path: path where predictions and metrics are written
                to
            epoch: the epoch where the evaluation takes place. This is
                used for the file prefixes. If this is -1, then we
                assume evaluation on test data.
            steps: the steps in the current epoch at time of the
                evaluation. This is used for the file prefixes. If this
                is -1, then we assume evaluation at the end of the
                epoch.

        Returns:
            Either a score for the evaluation with a higher score
            indicating a better result, or a dictionary with scores. If
            the latter is chosen, then `evaluator.primary_metric` must
            be defined
        Nr   )r
   r   r   r   r   r   r   r   Ú__call__   s    zSentenceEvaluator.__call__zdict[str, float]Ústr)ÚmetricsÚnamer   c                   sp   dddœdd„‰ ˆs*‡ fdd„|  ¡ D ƒS ‡ ‡fdd„|  ¡ D ƒ}t| dƒrl| j ˆd	 ¡slˆd	 | j | _|S )
Nr   )Úvaluer   c                 S  s&   z
t | ƒW S  ty    |  Y S 0 d S ©N)ÚfloatÚ
ValueError)r   r   r   r   Úmaybe_to_float:   s    
z@SentenceEvaluator.prefix_name_to_metrics.<locals>.maybe_to_floatc                   s   i | ]\}}|ˆ |ƒ“qS r   r   ©Ú.0Úkeyr   )r   r   r   Ú
<dictcomp>A   ó    z<SentenceEvaluator.prefix_name_to_metrics.<locals>.<dictcomp>c                   s"   i | ]\}}ˆd  | ˆ |ƒ“qS )Ú_r   r   ©r   r   r   r   r!   B   r"   r   r#   )ÚitemsÚhasattrr   Ú
startswith)r
   r   r   r   r$   r   Úprefix_name_to_metrics9   s    z(SentenceEvaluator.prefix_name_to_metricsr   zdict[str, Any]ÚNone)r   r   r   Ústepr   c                 C  s   |j  | |||¡ d S r   )Zmodel_card_dataZset_evaluation_metrics)r
   r   r   r   r*   r   r   r   Ú store_metrics_in_model_card_dataG   s    z2SentenceEvaluator.store_metrics_in_model_card_data)r   c                 C  s^   | j j}| d¡r"d|dd…  }z| d¡}|d|… }W n tyN   Y n0 t dd|¡S )a  
        Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification

        1. Replace "CE" prefix with "CrossEncoder"
        2. Remove "Evaluator" from the class name
        3. Add a space before every capital letter
        ZCEZCrossEncoderé   NZ	Evaluatorz([a-z])([A-Z])z\g<1> \g<2>)Ú	__class__Ú__name__r'   ÚindexÚ
IndexErrorÚreÚsub)r
   Ú
class_namer/   r   r   r   ÚdescriptionL   s    	

zSentenceEvaluator.descriptionc                 C  s   i S )z{
        Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
        r   r	   r   r   r   Úget_config_dictb   s    z!SentenceEvaluator.get_config_dictzstr | list[str] | np.ndarrayzPlist[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]])r   Ú	sentencesr   c                 K  s   |j |fi |¤ŽS )ae  
        Call the encoder method of the model pass

        Args:
            model (SentenceTransformer): Model we are evaluating
            sentences (str | list[str] | np.ndarray): Text that we are embedding

        Returns:
            list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
        )Úencode)r
   r   r6   Úkwargsr   r   r   Úembed_inputsh   s    zSentenceEvaluator.embed_inputs)Nr   r   )r   r   )r.   Ú
__module__Ú__qualname__Ú__doc__r   r   r(   r+   Úpropertyr4   r5   r9   r   r   r   r   r      s    ÿ ÿr   )Ú
__future__r   r1   Útypingr   r   ÚnumpyÚnpZtorchr   Z)sentence_transformers.SentenceTransformerr   r   r   r   r   r   Ú<module>   s   