a
    h[                     @  sz   d dl mZ d dlZd dlZd dlmZ d dlmZ d dlm	Z	m
Z
mZ eeZddddd	d
ZddddddZdS )    )annotationsN)Path)PretrainedConfig)_save_pretrained_wrapperbackend_should_exportbackend_warn_to_savestrr   )model_name_or_pathconfig	task_namec                 K  s  zbddl }ddlm}m}m}m} |||d}	||	vrXd|	 }
td| d|
 |	| }W n t	y|   t
dY n0 |d	| d |d	< t| }| }d
}d}t||||||\}}|r|dd |j| f||d|}t|jdd|_|rt| || |S )a  
    Load and perhaps export an ONNX model using the Optimum library.

    Args:
        model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
            or the path to a local model directory.
        config (PretrainedConfig): The model configuration.
        task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
        model_kwargs (dict): Additional keyword arguments for the model loading.
    r   N)ONNX_WEIGHTS_NAMEORTModelForFeatureExtractionORTModelForMaskedLM!ORTModelForSequenceClassificationzfeature-extractionz	fill-maskzsequence-classification, Unsupported task: . Supported tasks: zUsing the ONNX backend requires installing Optimum and ONNX Runtime. You can install them with pip: `pip install optimum[onnxruntime]` or `pip install optimum[onnxruntime-gpu]`providerZONNXz*.onnx	file_namer
   exportZonnxZ	subfolder)ZonnxruntimeZoptimum.onnxruntimer   r   r   r   joinkeys
ValueErrorModuleNotFoundError	ExceptionpopZget_available_providersr   existsr   from_pretrainedr   _save_pretrainedr   )r	   r
   r   model_kwargsZortr   r   r   r   task_to_model_mappingsupported_tasks	model_cls	load_pathis_localbackend_nametarget_file_globr   model r+   ^/var/www/html/assistant/venv/lib/python3.9/site-packages/sentence_transformers/backend/load.pyload_onnx_model   sJ    	
r-   c                 K  sj  zZddl m}m}m}m} |||d}||vrPd| }	td| d|	 || }
W n tyt   t	dY n0 t
| }| }d}d	}t||||||\}}|r|d
d d|v r$|d }t|ts,t
| stdt|dd}t||d< W d   n1 s0    Y  ni |d< |
j| f||d|}t|jdd|_|rft| || |S )a  
    Load and perhaps export an OpenVINO model using the Optimum library.

    Args:
        model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
            or the path to a local model directory.
        config (PretrainedConfig): The model configuration.
        task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
        model_kwargs (dict): Additional keyword arguments for the model loading.
    r   )OV_XML_FILE_NAMEOVModelForFeatureExtractionOVModelForMaskedLM OVModelForSequenceClassificationr   r   r   r   zUsing the OpenVINO backend requires installing Optimum and OpenVINO. You can install them with pip: `pip install optimum[openvino]`ZOpenVINOzopenvino*.xmlr   N	ov_configzXov_config should be a dictionary or a path to a .json file containing an OpenVINO configzutf-8)encodingr   Zopenvinor   )Zoptimum.intel.openvinor.   r/   r0   r1   r   r   r   r   r   r   r   r   r   
isinstancedictopenjsonloadr    r   r!   r   )r	   r
   r   r"   r.   r/   r0   r1   r#   r$   r%   r&   r'   r(   r)   r   r2   fr*   r+   r+   r,   load_openvino_modelZ   sZ    	

0r:   )
__future__r   r7   loggingpathlibr   Z transformers.configuration_utilsr   Z#sentence_transformers.backend.utilsr   r   r   	getLogger__name__loggerr-   r:   r+   r+   r+   r,   <module>   s   
L