o
    lWiE                     @  s   d dl mZ d dlZddlmZmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZ ejrOddlmZmZ eejdZG dd dZG dd dZdS )    )annotationsN   )AsyncClientWrapperSyncClientWrapper)RequestOptions)Voice)VoiceDesignPreviewResponse   )AsyncRawTextToVoiceClientRawTextToVoiceClient),TextToVoiceCreatePreviewsRequestOutputFormat)$TextToVoiceDesignRequestOutputFormat)#TextToVoiceRemixRequestOutputFormat)VoiceDesignRequestModelModelId)AsyncPreviewClientPreviewClient.c                   @     e Zd Zd:ddZed;ddZd	eeeeeed	d
d<ddZeed	dd=d&d'Zd	eeeeeeeeeeeed	d(d>d1d2Z	d	eeeeeeeeed	d3d?d6d7Z
ed8d9 Zd	S )@TextToVoiceClientclient_wrapperr   c                C     t |d| _|| _d | _d S Nr   )r   _raw_client_client_wrapper_previewselfr    r   c/var/www/html/asistente-voz-ia/venv/lib/python3.10/site-packages/elevenlabs/text_to_voice/client.py__init__      
zTextToVoiceClient.__init__returnr   c                 C     | j S )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawTextToVoiceClient
        r   r   r   r   r   with_raw_response      	z#TextToVoiceClient.with_raw_responseNoutput_formattextauto_generate_textloudnessqualityseedguidance_scalerequest_optionsvoice_descriptionstrr(   =typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]r)   typing.Optional[str]r*   typing.Optional[bool]r+   typing.Optional[float]r,   r-   typing.Optional[int]r.   r/   typing.Optional[RequestOptions]r   c       	         C  s$   | j j|||||||||	d	}
|
jS )aQ  
        Create a voice from a text prompt.

        Parameters
        ----------
        voice_description : str
            Description to use for the created voice.

        output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
            The output format of the generated audio.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        quality : typing.Optional[float]
            Higher quality results in better voice output but less variety.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_voice.create_previews(
            output_format="mp3_22050_32",
            voice_description="A sassy squeaky mouse",
        )
        	r0   r(   r)   r*   r+   r,   r-   r.   r/   r   create_previewsdatar   r0   r(   r)   r*   r+   r,   r-   r.   r/   	_responser   r   r   r:   (   s   >z!TextToVoiceClient.create_previewslabelsplayed_not_selected_voice_idsr/   
voice_namegenerated_voice_idr?   7typing.Optional[typing.Dict[str, typing.Optional[str]]]r@   %typing.Optional[typing.Sequence[str]]r   c                C  s   | j j||||||d}|jS )a  
        Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using POST /v1/text-to-voice/design or POST /v1/text-to-voice/:voice_id/remix.

        Parameters
        ----------
        voice_name : str
            Name to use for the created voice.

        voice_description : str
            Description to use for the created voice.

        generated_voice_id : str
            The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet.

        labels : typing.Optional[typing.Dict[str, typing.Optional[str]]]
            Optional, metadata to add to the created voice. Defaults to None.

        played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
            List of voice ids that the user has played but not selected. Used for RLHF.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        Voice
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_voice.create(
            voice_name="Sassy squeaky mouse",
            voice_description="A sassy squeaky mouse",
            generated_voice_id="37HceQefKmEi3bGovXjL",
        )
        rA   r0   rB   r?   r@   r/   r   creater;   r   rA   r0   rB   r?   r@   r/   r=   r   r   r   rG   s   s   3zTextToVoiceClient.creater(   model_idr)   r*   r+   r-   r.   stream_previewsremixing_session_idremixing_session_iteration_idr,   reference_audio_base_64prompt_strengthr/   5typing.Optional[TextToVoiceDesignRequestOutputFormat]rJ   /typing.Optional[VoiceDesignRequestModelModelId]rK   rL   rM   rN   rO   c                C  s0   | j j|||||||||	|
|||||d}|jS )u  
        Design a voice via a prompt. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. To create a voice use the generated_voice_id of the preferred preview with the /v1/text-to-voice endpoint.

        Parameters
        ----------
        voice_description : str
            Description to use for the created voice.

        output_format : typing.Optional[TextToVoiceDesignRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[VoiceDesignRequestModelModelId]
            Model to use for the voice generation. Possible values: eleven_multilingual_ttv_v2, eleven_ttv_v3.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        stream_previews : typing.Optional[bool]
            Determines whether the Text to Voice previews should be included in the response. If true, only the generated IDs will be returned which can then be streamed via the /v1/text-to-voice/:generated_voice_id/stream endpoint.

        remixing_session_id : typing.Optional[str]
            The remixing session id.

        remixing_session_iteration_id : typing.Optional[str]
            The id of the remixing session iteration where these generations should be attached to. If not provided, a new iteration will be created.

        quality : typing.Optional[float]
            Higher quality results in better voice output but less variety.

        reference_audio_base_64 : typing.Optional[str]
            Reference audio to use for the voice generation. The audio should be base64 encoded. Only supported when using the  eleven_ttv_v3 model.

        prompt_strength : typing.Optional[float]
            Controls the balance of prompt versus reference audio when generating voice samples. 0 means almost no prompt influence, 1 means almost no reference audio influence. Only supported when using the eleven_ttv_v3 model and providing reference audio.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_voice.design(
            output_format="mp3_22050_32",
            voice_description="A sassy squeaky mouse",
        )
        r0   r(   rJ   r)   r*   r+   r-   r.   rK   rL   rM   r,   rN   rO   r/   r   designr;   r   r0   r(   rJ   r)   r*   r+   r-   r.   rK   rL   rM   r,   rN   rO   r/   r=   r   r   r   rT      s$   VzTextToVoiceClient.designr(   r)   r*   r+   r-   r.   rK   rL   rM   rO   r/   voice_id4typing.Optional[TextToVoiceRemixRequestOutputFormat]c                C  s,   | j j|||||||||	|
|||d}|jS )u  
        Remix an existing voice via a prompt. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. To create a voice use the generated_voice_id of the preferred preview with the /v1/text-to-voice endpoint.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        voice_description : str
            Description of the changes to make to the voice.

        output_format : typing.Optional[TextToVoiceRemixRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        stream_previews : typing.Optional[bool]
            Determines whether the Text to Voice previews should be included in the response. If true, only the generated IDs will be returned which can then be streamed via the /v1/text-to-voice/:generated_voice_id/stream endpoint.

        remixing_session_id : typing.Optional[str]
            The remixing session id.

        remixing_session_iteration_id : typing.Optional[str]
            The id of the remixing session iteration where these generations should be attached to. If not provided, a new iteration will be created.

        prompt_strength : typing.Optional[float]
            Controls the balance of prompt versus reference audio when generating voice samples. 0 means almost no prompt influence, 1 means almost no reference audio influence. Only supported when using the eleven_ttv_v3 model and providing reference audio.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_voice.remix(
            voice_id="21m00Tcm4TlvDq8ikWAM",
            output_format="mp3_22050_32",
            voice_description="Make the voice have a higher pitch.",
        )
        r0   r(   r)   r*   r+   r-   r.   rK   rL   rM   rO   r/   r   remixr;   r   rW   r0   r(   r)   r*   r+   r-   r.   rK   rL   rM   rO   r/   r=   r   r   r   r[     s    OzTextToVoiceClient.remixc                 C  *   | j d u rddlm} || jd| _ | j S )Nr	   )r   r   )r   preview.clientr   r   )r   r   r   r   r   previewy     
zTextToVoiceClient.preview)r   r   )r!   r   r0   r1   r(   r2   r)   r3   r*   r4   r+   r5   r,   r5   r-   r6   r.   r5   r/   r7   r!   r   rA   r1   r0   r1   rB   r1   r?   rC   r@   rD   r/   r7   r!   r    r0   r1   r(   rP   rJ   rQ   r)   r3   r*   r4   r+   r5   r-   r6   r.   r5   rK   r4   rL   r3   rM   r3   r,   r5   rN   r3   rO   r5   r/   r7   r!   r   rW   r1   r0   r1   r(   rX   r)   r3   r*   r4   r+   r5   r-   r6   r.   r5   rK   r4   rL   r3   rM   r3   rO   r5   r/   r7   r!   r   __name__
__module____qualname__r   propertyr%   OMITr:   rG   rT   r[   r_   r   r   r   r   r      s\    
QAn`r   c                   @  r   )@AsyncTextToVoiceClientr   r   c                C  r   r   )r
   r   r   r   r   r   r   r   r     r    zAsyncTextToVoiceClient.__init__r!   r
   c                 C  r"   )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawTextToVoiceClient
        r#   r$   r   r   r   r%     r&   z(AsyncTextToVoiceClient.with_raw_responseNr'   r0   r1   r(   r2   r)   r3   r*   r4   r+   r5   r,   r-   r6   r.   r/   r7   r   c       	           s,   | j j|||||||||	d	I dH }
|
jS )a  
        Create a voice from a text prompt.

        Parameters
        ----------
        voice_description : str
            Description to use for the created voice.

        output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
            The output format of the generated audio.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        quality : typing.Optional[float]
            Higher quality results in better voice output but less variety.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_voice.create_previews(
                output_format="mp3_22050_32",
                voice_description="A sassy squeaky mouse",
            )


        asyncio.run(main())
        r8   Nr9   r<   r   r   r   r:     s   Fz&AsyncTextToVoiceClient.create_previewsr>   rA   rB   r?   rC   r@   rD   r   c                  s&   | j j||||||dI dH }|jS )aH  
        Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using POST /v1/text-to-voice/design or POST /v1/text-to-voice/:voice_id/remix.

        Parameters
        ----------
        voice_name : str
            Name to use for the created voice.

        voice_description : str
            Description to use for the created voice.

        generated_voice_id : str
            The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet.

        labels : typing.Optional[typing.Dict[str, typing.Optional[str]]]
            Optional, metadata to add to the created voice. Defaults to None.

        played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]]
            List of voice ids that the user has played but not selected. Used for RLHF.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        Voice
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_voice.create(
                voice_name="Sassy squeaky mouse",
                voice_description="A sassy squeaky mouse",
                generated_voice_id="37HceQefKmEi3bGovXjL",
            )


        asyncio.run(main())
        rE   NrF   rH   r   r   r   rG     s   ;zAsyncTextToVoiceClient.createrI   rP   rJ   rQ   rK   rL   rM   rN   rO   c                  s8   | j j|||||||||	|
|||||dI dH }|jS )u7  
        Design a voice via a prompt. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. To create a voice use the generated_voice_id of the preferred preview with the /v1/text-to-voice endpoint.

        Parameters
        ----------
        voice_description : str
            Description to use for the created voice.

        output_format : typing.Optional[TextToVoiceDesignRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[VoiceDesignRequestModelModelId]
            Model to use for the voice generation. Possible values: eleven_multilingual_ttv_v2, eleven_ttv_v3.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        stream_previews : typing.Optional[bool]
            Determines whether the Text to Voice previews should be included in the response. If true, only the generated IDs will be returned which can then be streamed via the /v1/text-to-voice/:generated_voice_id/stream endpoint.

        remixing_session_id : typing.Optional[str]
            The remixing session id.

        remixing_session_iteration_id : typing.Optional[str]
            The id of the remixing session iteration where these generations should be attached to. If not provided, a new iteration will be created.

        quality : typing.Optional[float]
            Higher quality results in better voice output but less variety.

        reference_audio_base_64 : typing.Optional[str]
            Reference audio to use for the voice generation. The audio should be base64 encoded. Only supported when using the  eleven_ttv_v3 model.

        prompt_strength : typing.Optional[float]
            Controls the balance of prompt versus reference audio when generating voice samples. 0 means almost no prompt influence, 1 means almost no reference audio influence. Only supported when using the eleven_ttv_v3 model and providing reference audio.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_voice.design(
                output_format="mp3_22050_32",
                voice_description="A sassy squeaky mouse",
            )


        asyncio.run(main())
        rR   NrS   rU   r   r   r   rT   +  s&   ^zAsyncTextToVoiceClient.designrV   rW   rX   c                  s4   | j j|||||||||	|
|||dI dH }|jS )u  
        Remix an existing voice via a prompt. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. To create a voice use the generated_voice_id of the preferred preview with the /v1/text-to-voice endpoint.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        voice_description : str
            Description of the changes to make to the voice.

        output_format : typing.Optional[TextToVoiceRemixRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        text : typing.Optional[str]
            Text to generate, text length has to be between 100 and 1000.

        auto_generate_text : typing.Optional[bool]
            Whether to automatically generate a text suitable for the voice description.

        loudness : typing.Optional[float]
            Controls the volume level of the generated voice. -1 is quietest, 1 is loudest, 0 corresponds to roughly -24 LUFS.

        seed : typing.Optional[int]
            Random number that controls the voice generation. Same seed with same inputs produces same voice.

        guidance_scale : typing.Optional[float]
            Controls how closely the AI follows the prompt. Lower numbers give the AI more freedom to be creative, while higher numbers force it to stick more to the prompt. High numbers can cause voice to sound artificial or robotic. We recommend to use longer, more detailed prompts at lower Guidance Scale.

        stream_previews : typing.Optional[bool]
            Determines whether the Text to Voice previews should be included in the response. If true, only the generated IDs will be returned which can then be streamed via the /v1/text-to-voice/:generated_voice_id/stream endpoint.

        remixing_session_id : typing.Optional[str]
            The remixing session id.

        remixing_session_iteration_id : typing.Optional[str]
            The id of the remixing session iteration where these generations should be attached to. If not provided, a new iteration will be created.

        prompt_strength : typing.Optional[float]
            Controls the balance of prompt versus reference audio when generating voice samples. 0 means almost no prompt influence, 1 means almost no reference audio influence. Only supported when using the eleven_ttv_v3 model and providing reference audio.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        VoiceDesignPreviewResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_voice.remix(
                voice_id="21m00Tcm4TlvDq8ikWAM",
                output_format="mp3_22050_32",
                voice_description="Make the voice have a higher pitch.",
            )


        asyncio.run(main())
        rY   NrZ   r\   r   r   r   r[     s"   WzAsyncTextToVoiceClient.remixc                 C  r]   )Nr	   )r   r   )r   r^   r   r   )r   r   r   r   r   r_     r`   zAsyncTextToVoiceClient.preview)r   r   )r!   r
   ra   rb   rc   rd   re   r   r   r   r   rk     s\    
YIvhrk   ) 
__future__r   typingcore.client_wrapperr   r   core.request_optionsr   types.voicer   #types.voice_design_preview_responser   
raw_clientr
   r   9types.text_to_voice_create_previews_request_output_formatr   0types.text_to_voice_design_request_output_formatr   /types.text_to_voice_remix_request_output_formatr   )types.voice_design_request_model_model_idr   TYPE_CHECKINGr^   r   r   castAnyrj   r   rk   r   r   r   r   <module>   s$     m