o
    lWi                     @   s   d dl Z ddlmZmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZ d
dlmZmZ d
dlmZ d
dlmZ d
dlmZ d
dlmZ d
dlmZ d
dlmZ d
dlm Z  d
dl!m"Z" e #e j$dZ%G dd dZ&G dd dZ'dS )    N   )AsyncClientWrapperSyncClientWrapper)RequestOptions)0AudioWithTimestampsAndVoiceSegmentsResponseModel)DialogueInput)ModelSettingsResponseModel)%PronunciationDictionaryVersionLocator)>StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel   )AsyncRawTextToDialogueClientRawTextToDialogueClient):BodyTextToDialogueFullWithTimestampsApplyTextNormalization)UBodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization)FBodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization)<BodyTextToDialogueStreamWithTimestampsApplyTextNormalization)(TextToDialogueConvertRequestOutputFormat)6TextToDialogueConvertWithTimestampsRequestOutputFormat)'TextToDialogueStreamRequestOutputFormat)5TextToDialogueStreamWithTimestampsRequestOutputFormat.c                   @   F  e Zd ZdefddZedefddZdeeeeeeddd	e	j
e d
e	je de	je de	je de	je de	je	j
e  de	je de	je de	je de	je fddZdeeeeeeddd	e	j
e d
e	je de	je de	je de	je de	je	j
e  de	je de	je de	je de	je fddZdeeeeeeddd	e	j
e d
e	je de	je de	je de	je de	je	j
e  de	je de	je de	je de	je fddZdeeeeeeddd	e	j
e d
e	je de	je de	je de	je de	je	j
e  de	je de	je de	je de fddZ!dS )TextToDialogueClientclient_wrapperc                C      t |d| _d S N)r   )r   _raw_clientselfr    r   f/var/www/html/asistente-voz-ia/venv/lib/python3.10/site-packages/elevenlabs/text_to_dialogue/client.py__init__+      zTextToDialogueClient.__init__returnc                 C      | j S )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawTextToDialogueClient
        r   r   r   r   r   with_raw_response.      	z&TextToDialogueClient.with_raw_responseNoutput_formatmodel_idlanguage_codesettings!pronunciation_dictionary_locatorsseedapply_text_normalizationrequest_optionsinputsr)   r*   r+   r,   r-   r.   r/   r0   c       	         c   R    | j j|||||||||	d	}
|
jE dH  W d   dS 1 s"w   Y  dS )u  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns audio.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[bytes]
            The generated audio file

        Examples
        --------
        from elevenlabs import DialogueInput, ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_dialogue.convert(
            inputs=[
                DialogueInput(
                    text="Knock knock",
                    voice_id="JBFqnCBsd6RMkjVDRZzb",
                ),
                DialogueInput(
                    text="Who is there?",
                    voice_id="Aw4FAjKCGjjNkVhN1Xmq",
                ),
            ],
        )
        	r1   r)   r*   r+   r,   r-   r.   r/   r0   Nr   convertdatar   r1   r)   r*   r+   r,   r-   r.   r/   r0   rr   r   r   r5   9      J
"zTextToDialogueClient.convertc       	         c   r2   )u  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns an audio stream.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[bytes]
            Streaming audio data

        Examples
        --------
        from elevenlabs import DialogueInput, ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_dialogue.stream(
            inputs=[
                DialogueInput(
                    text="Knock knock",
                    voice_id="JBFqnCBsd6RMkjVDRZzb",
                ),
                DialogueInput(
                    text="Who is there?",
                    voice_id="Aw4FAjKCGjjNkVhN1Xmq",
                ),
            ],
        )
        r3   Nr   streamr6   r7   r   r   r   r;      r9   zTextToDialogueClient.streamc       	         c   r2   )u  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns a stream of JSON blobs containing audio as a base64 encoded string and timestamps

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.Iterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]
            Stream of transcription chunks

        Examples
        --------
        from elevenlabs import DialogueInput, ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        response = client.text_to_dialogue.stream_with_timestamps(
            output_format="mp3_22050_32",
            inputs=[
                DialogueInput(
                    text="Hello, how are you?",
                    voice_id="bYTqZQo3Jz7LQtmGTgwi",
                ),
                DialogueInput(
                    text="I'm doing well, thank you!",
                    voice_id="6lCwbsX1yVjD49QmpkTR",
                ),
            ],
        )
        for chunk in response:
            yield chunk
        r3   Nr   stream_with_timestampsr6   r7   r   r   r   r=      s   K
"z+TextToDialogueClient.stream_with_timestampsc       	         C   s$   | j j|||||||||	d	}
|
jS )u  
        Generate dialogue from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AudioWithTimestampsAndVoiceSegmentsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import DialogueInput, ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_dialogue.convert_with_timestamps(
            output_format="mp3_22050_32",
            inputs=[
                DialogueInput(
                    text="Hello, how are you?",
                    voice_id="bYTqZQo3Jz7LQtmGTgwi",
                ),
                DialogueInput(
                    text="I'm doing well, thank you!",
                    voice_id="6lCwbsX1yVjD49QmpkTR",
                ),
            ],
        )
        r3   r   convert_with_timestampsr6   r   r1   r)   r*   r+   r,   r-   r.   r/   r0   	_responser   r   r   r?   ?  s   Iz,TextToDialogueClient.convert_with_timestamps)"__name__
__module____qualname__r   r    propertyr   r&   OMITtypingSequencer   Optionalr   strr   r	   intr   r   Iteratorbytesr5   r   r   r;   r   r   r
   r=   r   r   r   r?   r   r   r   r   r   *   s   
[
[
\r   c                   @   r   )AsyncTextToDialogueClientr   c                C   r   r   )r   r   r   r   r   r   r      r!   z"AsyncTextToDialogueClient.__init__r"   c                 C   r#   )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawTextToDialogueClient
        r$   r%   r   r   r   r&     r'   z+AsyncTextToDialogueClient.with_raw_responseNr(   r1   r)   r*   r+   r,   r-   r.   r/   r0   c       	         C  v   | j j|||||||||	d	4 I dH }
|
j2 z	3 dH W }|V  q6 W d  I dH  dS 1 I dH s4w   Y  dS )u  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns audio.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[bytes]
            The generated audio file

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs, DialogueInput

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_dialogue.convert(
                inputs=[
                    DialogueInput(
                        text="Knock knock",
                        voice_id="JBFqnCBsd6RMkjVDRZzb",
                    ),
                    DialogueInput(
                        text="Who is there?",
                        voice_id="Aw4FAjKCGjjNkVhN1Xmq",
                    ),
                ],
            )


        asyncio.run(main())
        r3   Nr4   r   r1   r)   r*   r+   r,   r-   r.   r/   r0   r8   _chunkr   r   r   r5     "   R
.z!AsyncTextToDialogueClient.convertc       	         C  rO   )u  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns an audio stream.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[bytes]
            Streaming audio data

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs, DialogueInput

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_dialogue.stream(
                inputs=[
                    DialogueInput(
                        text="Knock knock",
                        voice_id="JBFqnCBsd6RMkjVDRZzb",
                    ),
                    DialogueInput(
                        text="Who is there?",
                        voice_id="Aw4FAjKCGjjNkVhN1Xmq",
                    ),
                ],
            )


        asyncio.run(main())
        r3   Nr:   rP   r   r   r   r;     rR   z AsyncTextToDialogueClient.streamc       	         C  rO   )uT  
        Converts a list of text and voice ID pairs into speech (dialogue) and returns a stream of JSON blobs containing audio as a base64 encoded string and timestamps

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.AsyncIterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]
            Stream of transcription chunks

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs, DialogueInput

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            response = await client.text_to_dialogue.stream_with_timestamps(
                output_format="mp3_22050_32",
                inputs=[
                    DialogueInput(
                        text="Hello, how are you?",
                        voice_id="bYTqZQo3Jz7LQtmGTgwi",
                    ),
                    DialogueInput(
                        text="I'm doing well, thank you!",
                        voice_id="6lCwbsX1yVjD49QmpkTR",
                    ),
                ],
            )
            async for chunk in response:
                yield chunk


        asyncio.run(main())
        r3   Nr<   rP   r   r   r   r=   e  s"   S
.z0AsyncTextToDialogueClient.stream_with_timestampsc       	            s,   | j j|||||||||	d	I dH }
|
jS )u  
        Generate dialogue from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AudioWithTimestampsAndVoiceSegmentsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs, DialogueInput

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_dialogue.convert_with_timestamps(
                output_format="mp3_22050_32",
                inputs=[
                    DialogueInput(
                        text="Hello, how are you?",
                        voice_id="bYTqZQo3Jz7LQtmGTgwi",
                    ),
                    DialogueInput(
                        text="I'm doing well, thank you!",
                        voice_id="6lCwbsX1yVjD49QmpkTR",
                    ),
                ],
            )


        asyncio.run(main())
        r3   Nr>   r@   r   r   r   r?     s   Qz1AsyncTextToDialogueClient.convert_with_timestamps)"rB   rC   rD   r   r    rE   r   r&   rF   rG   rH   r   rI   r   rJ   r   r	   rK   r   r   AsyncIteratorrM   r5   r   r   r;   r   r   r
   r=   r   r   r   r?   r   r   r   r   rN     s   
d
d
erN   )(rG   core.client_wrapperr   r   core.request_optionsr   =types.audio_with_timestamps_and_voice_segments_response_modelr   types.dialogue_inputr   #types.model_settings_response_modelr   .types.pronunciation_dictionary_version_locatorr	   Mtypes.streaming_audio_chunk_with_timestamps_and_voice_segments_response_modelr
   
raw_clientr   r   Itypes.body_text_to_dialogue_full_with_timestamps_apply_text_normalizationr   ktypes.body_text_to_dialogue_multi_voice_streaming_v_1_text_to_dialogue_stream_post_apply_text_normalizationr   Ztypes.body_text_to_dialogue_multi_voice_v_1_text_to_dialogue_post_apply_text_normalizationr   Ktypes.body_text_to_dialogue_stream_with_timestamps_apply_text_normalizationr   4types.text_to_dialogue_convert_request_output_formatr   Dtypes.text_to_dialogue_convert_with_timestamps_request_output_formatr   3types.text_to_dialogue_stream_request_output_formatr   Ctypes.text_to_dialogue_stream_with_timestamps_request_output_formatr   castAnyrF   r   rN   r   r   r   r   <module>   s,     n