a
    h                     @   s   d dl mZmZmZ ddlmZmZmZmZm	Z	 ddl
mZmZ e r^d dlmZ ddlmZ e rpddlmZ eeZeed	d
G dd deZdS )    )AnyUnionoverload   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMEST)Zhas_image_processorc                       s   e Zd ZdZdZdZdZdZ fddZe	e
edf eeeef ddd	Ze	ee
edf  eeeeef  dd
d	Ze
eee ded f ee
eeef eeeef  f d fdd	ZdddZdddZdd Zdd Z  ZS )DepthEstimationPipelinea  
    Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf")
    >>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
    >>> # This is a tensor with the values being the depth expressed in meters for each pixel
    >>> output["predicted_depth"].shape
    torch.Size([1, 384, 384])
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)


    This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"depth-estimation"`.

    See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation).
    FTc                    s*   t  j|i | t| d | t d S )NZvision)super__init__r
   Zcheck_model_typer   )selfargskwargs	__class__ c/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/pipelines/depth_estimation.pyr   7   s    
z DepthEstimationPipeline.__init__zImage.Image)inputsr   returnc                 K   s   d S Nr   r   r   r   r   r   r   __call__<   s    z DepthEstimationPipeline.__call__c                 K   s   d S r   r   r   r   r   r   r   ?   s    c                    s6   d|v r| d}|du r"tdt j|fi |S )a  
        Predict the depth(s) of the image(s) passed as inputs.

        Args:
            inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing a http link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
                Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
                images.
            parameters (`Dict`, *optional*):
                A dictionary of argument names to parameter values, to control pipeline behaviour.
                The only parameter available right now is `timeout`, which is the length of time, in seconds,
                that the pipeline should wait before giving up on trying to download an image.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.

        Return:
            A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
            dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
            the images.

            The dictionaries contain the following keys:

            - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`.
            - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`.
        imagesNzECannot call the depth-estimation pipeline without an inputs argument!)pop
ValueErrorr   r   r   r   r   r   r   B   s
    $
Nc                 K   s<   i }|d ur||d< t |tr2d|v r2|d |d< |i i fS )Ntimeout)
isinstancedict)r   r#   
parametersr   Zpreprocess_paramsr   r   r   _sanitize_parametersl   s    z,DepthEstimationPipeline._sanitize_parametersc                 C   sH   t ||}| j|| jd}| jdkr0|| j}|jd d d |d< |S )N)r    Zreturn_tensorspttarget_size)r   image_processorZ	frameworktoZdtypesize)r   imager#   model_inputsr   r   r   
preprocesst   s    

z"DepthEstimationPipeline.preprocessc                 C   s&   | d}| jf i |}||d< |S )Nr*   )r!   model)r   r/   r*   model_outputsr   r   r   _forward|   s    
z DepthEstimationPipeline._forwardc                 C   s   | j ||d g}g }|D ]\}|d    }||  | |   }t|d 	d}|
|d |d qt|dkr|d S |S )Nr*   predicted_depth   Zuint8)r4   depthr   r   )r+   Zpost_process_depth_estimationdetachcpunumpyminmaxr   Z	fromarrayZastypeappendlen)r   r2   outputsZformatted_outputsoutputr6   r   r   r   postprocess   s    z#DepthEstimationPipeline.postprocess)NN)N)__name__
__module____qualname____doc__Z_load_processorZ_load_image_processorZ_load_feature_extractorZ_load_tokenizerr   r   r   strr   r%   r   listr'   r0   r3   r@   __classcell__r   r   r   r   r      s"   $,*

r   N)typingr   r   r   utilsr   r   r   r	   r
   baser   r   ZPILr   Zimage_utilsr   Zmodels.auto.modeling_autor   Z
get_loggerrA   loggerr   r   r   r   r   <module>   s   
