a
    h                     @   s   d dl mZmZmZ d dlZddlmZmZm	Z	m
Z
mZ ddlmZmZ e	 rfd dlmZ ddlmZ e rxdd	lmZ e
eZeed
dG dd deZdS )    )AnyUnionoverloadN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)&MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMEST)Zhas_image_processorc                       s   e Zd ZdZdZdZdZdZ fddZdd Z	e
eedf edd	d
dZe
eee ed f eed d	ddZeeee ded f eeded f d	 fddZdd ZdddZdd Z  ZS )ImageToImagePipelineao  
    Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous
    image input.

    Example:

    ```python
    >>> from PIL import Image
    >>> import requests

    >>> from transformers import pipeline

    >>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64")
    >>> img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
    >>> img = img.resize((64, 64))
    >>> upscaled_img = upscaler(img)
    >>> img.size
    (64, 64)

    >>> upscaled_img.size
    (144, 144)
    ```

    This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"image-to-image"`.

    See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image).
    FTc                    s*   t  j|i | t| d | t d S )NZvision)super__init__r
   Zcheck_model_typer   )selfargskwargs	__class__ a/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/pipelines/image_to_image.pyr   K   s    
zImageToImagePipeline.__init__c                 K   s>   i }i }i }d|v r |d |d< d|v r4|d |d< |||fS )NtimeoutZ	head_maskr   )r   r   Zpreprocess_paramsZpostprocess_paramsZforward_paramsr   r   r   _sanitize_parametersP   s    z)ImageToImagePipeline._sanitize_parameterszImage.Image)imagesr   returnc                 K   s   d S Nr   r   r   r   r   r   r   __call__\   s    zImageToImagePipeline.__call__c                 K   s   d S r   r   r    r   r   r   r!   _   s    c                    s   t  j|fi |S )a  
        Transform the image(s) passed as inputs.

        Args:
            images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing a http link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
                Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
                images.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
                the call may block forever.

        Return:
            An image (Image.Image) or a list of images (list["Image.Image"]) containing result(s). If the input is a
            single image, the return will be also a single image, if the input is a list of several images, it will
            return a list of transformed images.
        )r   r!   r    r   r   r   r!   b   s    c                 C   s   | j f i |}|S r   )model)r   Zmodel_inputsmodel_outputsr   r   r   _forward~   s    zImageToImagePipeline._forwardNc                 C   s6   t ||d}| j|gdd}| jdkr2|| j}|S )N)r   pt)r   Zreturn_tensors)r   Zimage_processorZ	frameworktoZdtype)r   imager   inputsr   r   r   
preprocess   s
    
zImageToImagePipeline.preprocessc                 C   s   g }d|v r|j }|D ]V}|j   dd }tj|ddd}|d 	 
tj}|t| qt|dkr~|S |d S )Nreconstructionr   r   )sourceZdestinationg     o@)r*   dataZsqueezefloatcpuZclamp_numpynpZmoveaxisroundZastypeZuint8appendr   Z	fromarraylen)r   r#   r   outputsoutputr   r   r   postprocess   s    z ImageToImagePipeline.postprocess)N)__name__
__module____qualname____doc__Z_load_processorZ_load_image_processorZ_load_feature_extractorZ_load_tokenizerr   r   r   r   strr   r!   listr$   r)   r7   __classcell__r   r   r   r   r   '   s"   (
r   )typingr   r   r   r0   r1   utilsr   r   r   r	   r
   baser   r   ZPILr   Zimage_utilsr   Zmodels.auto.modeling_autor   Z
get_loggerr8   loggerr   r   r   r   r   <module>   s   
