a
    hj)                     @   sv   d dl mZmZmZ ddlmZ ddlmZ ddlm	Z	 e r^d dl
Z
ddlmZ dd	lmZ d
ZG dd de	ZdS )    )AnyUnionoverload   )GenerationConfig)is_torch_available   )PipelineN)%MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING)SpeechT5HifiGanzmicrosoft/speecht5_hifiganc                       s   e Zd ZdZdZdZdZdZdZdZe	ddZ
dddd fdd	
Zd
d Zdd Zeeeeeef dddZeee eeeeef  dddZeeee f eeeef eeeef  f d fddZdddZdd Z  ZS )TextToAudioPipelinea  
    Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
    pipeline generates an audio file from an input text and optional other conditional inputs.

    Unless the model you're using explicitly sets these generation parameters in its configuration files
    (`generation_config.json`), the following default values will be used:
    - max_new_tokens: 256

    Example:

    ```python
    >>> from transformers import pipeline

    >>> pipe = pipeline(model="suno/bark-small")
    >>> output = pipe("Hey it's HuggingFace on the phone!")

    >>> audio = output["audio"]
    >>> sampling_rate = output["sampling_rate"]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    <Tip>

    You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or
    [`TextToAudioPipeline.__call__.generate_kwargs`].

    Example:

    ```python
    >>> from transformers import pipeline

    >>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt")

    >>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length
    >>> generate_kwargs = {
    ...     "do_sample": True,
    ...     "temperature": 0.7,
    ...     "max_new_tokens": 35,
    ... }

    >>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs)
    ```

    </Tip>

    This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
    `"text-to-audio"`.

    See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
    TF   )Zmax_new_tokensN)vocodersampling_rateno_processorc          	         s  t  j|i | || _| jdkr*tdd | _| jjt	 v rb|d u r\t
t| jjn|| _|| _| jd ur~| jjj| _| jd u r| jj}| jjdd }|d ur||  dD ]}t||d }|d ur|| _q| jd u r| jst| jdr| jjj| _d S )Ntfz5The TextToAudioPipeline is only available in PyTorch.generation_config)Zsample_rater   feature_extractor)super__init__r   Z	framework
ValueErrorr   model	__class__r
   valuesr   Zfrom_pretrainedDEFAULT_VOCODER_IDtodevicer   config__dict__getupdateto_dictgetattrhasattr	processorr   )	selfr   r   r   argskwargsr   Z
gen_configZsampling_rate_namer    `/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/pipelines/text_to_audio.pyr   a   s0    


"zTextToAudioPipeline.__init__c                 K   sv   t |tr|g}| jjjdkrH| jjddddddd}|| |}| j	rT| j
n| j}||fi |dd	i}|S )
NZbarkZmax_input_semantic_lengthr   FT
max_length)r+   Zadd_special_tokensZreturn_attention_maskZreturn_token_type_idspaddingZreturn_tensorspt)
isinstancestrr   r   Z
model_typer   Zsemantic_configr   r    r   	tokenizerr$   )r%   textr'   Z
new_kwargspreprocessoroutputr)   r)   r*   
preprocess   s    
	
zTextToAudioPipeline.preprocessc                 K   s   | j || jd}|d }|d }| j rn| j || jd}d|vrL| j|d< || | jjf i ||}n2t|rtd|	  | jf i ||d }| j
d ur| 
|}|S )N)r   forward_paramsgenerate_kwargsr   zYou're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty. For forward-only TTA models, please use `forward_params` instead of `generate_kwargs`. For reference, the `generate_kwargs` used here are: r   )Z_ensure_tensor_on_devicer   r   Zcan_generater   r    generatelenr   keysr   )r%   Zmodel_inputsr'   r5   r6   r3   r)   r)   r*   _forward   s&    




zTextToAudioPipeline._forward)text_inputsr5   returnc                 K   s   d S Nr)   r%   r;   r5   r)   r)   r*   __call__   s    zTextToAudioPipeline.__call__c                 K   s   d S r=   r)   r>   r)   r)   r*   r?      s    )r;   r<   c                    s   t  j|fi |S )a  
        Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.

        Args:
            text_inputs (`str` or `list[str]`):
                The text(s) to generate.
            forward_params (`dict`, *optional*):
                Parameters passed to the model generation/forward method. `forward_params` are always passed to the
                underlying model.
            generate_kwargs (`dict`, *optional*):
                The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
                complete overview of generate, check the [following
                guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are
                only passed to the underlying model if the latter is a generative model.

        Return:
            A `dict` or a list of `dict`: The dictionaries have two keys:

            - **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
            - **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
        )r   r?   r>   r(   r)   r*   r?      s    c                 C   sr   t | dd d ur| j|d< t | dd d ur>| j|d< | j|d< |rF|ni |rP|ni d}|d u rdi }i }|||fS )Nassistant_modelassistant_tokenizerr0   )r5   r6   )r"   r@   r0   rA   )r%   Zpreprocess_paramsr5   r6   paramsZpostprocess_paramsr)   r)   r*   _sanitize_parameters   s    




z(TextToAudioPipeline._sanitize_parametersc                 C   sj   i }| j r8t|tr|d }qDt|tr2|d }qD|}n| j|}|jdtjd	 |d< | j
|d< |S )Nwaveformr   cpu)r   Zdtypeaudior   )r   r.   dicttupler$   decoder   torchfloatnumpyr   )r%   rF   Zoutput_dictrD   r)   r)   r*   postprocess   s    




zTextToAudioPipeline.postprocess)NNN)__name__
__module____qualname____doc__Z_load_processorZ_pipeline_calls_generateZ_load_image_processorZ_load_feature_extractorZ_load_tokenizerr   Z_default_generation_configr   r4   r:   r   r/   r   rG   r?   listr   rC   rM   __classcell__r)   r)   r(   r*   r      s2   5&!$   
r   )typingr   r   r   Z
generationr   utilsr   baser	   rJ   Zmodels.auto.modeling_autor
   Z!models.speecht5.modeling_speecht5r   r   r   r)   r)   r)   r*   <module>   s   