# This file was auto-generated by Fern from our API Definition.

import contextlib
import json
import typing
from json.decoder import JSONDecodeError

from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.http_response import AsyncHttpResponse, HttpResponse
from ..core.request_options import RequestOptions
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.audio_with_timestamps_and_voice_segments_response_model import (
    AudioWithTimestampsAndVoiceSegmentsResponseModel,
)
from ..types.dialogue_input import DialogueInput
from ..types.http_validation_error import HttpValidationError
from ..types.model_settings_response_model import ModelSettingsResponseModel
from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator
from ..types.streaming_audio_chunk_with_timestamps_and_voice_segments_response_model import (
    StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel,
)
from .types.body_text_to_dialogue_full_with_timestamps_apply_text_normalization import (
    BodyTextToDialogueFullWithTimestampsApplyTextNormalization,
)
from .types.body_text_to_dialogue_multi_voice_streaming_v_1_text_to_dialogue_stream_post_apply_text_normalization import (
    BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization,
)
from .types.body_text_to_dialogue_multi_voice_v_1_text_to_dialogue_post_apply_text_normalization import (
    BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization,
)
from .types.body_text_to_dialogue_stream_with_timestamps_apply_text_normalization import (
    BodyTextToDialogueStreamWithTimestampsApplyTextNormalization,
)
from .types.text_to_dialogue_convert_request_output_format import TextToDialogueConvertRequestOutputFormat
from .types.text_to_dialogue_convert_with_timestamps_request_output_format import (
    TextToDialogueConvertWithTimestampsRequestOutputFormat,
)
from .types.text_to_dialogue_stream_request_output_format import TextToDialogueStreamRequestOutputFormat
from .types.text_to_dialogue_stream_with_timestamps_request_output_format import (
    TextToDialogueStreamWithTimestampsRequestOutputFormat,
)

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class RawTextToDialogueClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._client_wrapper = client_wrapper

    @contextlib.contextmanager
    def convert(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueConvertRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns audio.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            The generated audio file
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    @contextlib.contextmanager
    def stream(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueStreamRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns an audio stream.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            Streaming audio data
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue/stream",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    @contextlib.contextmanager
    def stream_with_timestamps(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]]]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns a stream of JSON blobs containing audio as a base64 encoded string and timestamps

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.Iterator[HttpResponse[typing.Iterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]]]
            Stream of transcription chunks
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue/stream/with-timestamps",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[
                typing.Iterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]
            ]:
                try:
                    if 200 <= _response.status_code < 300:

                        def _iter():
                            for _text in _response.iter_lines():
                                try:
                                    if len(_text) == 0:
                                        continue
                                    yield typing.cast(
                                        StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel,
                                        construct_type(
                                            type_=StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel,  # type: ignore
                                            object_=json.loads(_text),
                                        ),
                                    )
                                except Exception:
                                    pass
                            return

                        return HttpResponse(response=_response, data=_iter())
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    def convert_with_timestamps(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[AudioWithTimestampsAndVoiceSegmentsResponseModel]:
        """
        Generate dialogue from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[AudioWithTimestampsAndVoiceSegmentsResponseModel]
            Successful Response
        """
        _response = self._client_wrapper.httpx_client.request(
            "v1/text-to-dialogue/with-timestamps",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    AudioWithTimestampsAndVoiceSegmentsResponseModel,
                    construct_type(
                        type_=AudioWithTimestampsAndVoiceSegmentsResponseModel,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            if _response.status_code == 422:
                raise UnprocessableEntityError(
                    headers=dict(_response.headers),
                    body=typing.cast(
                        HttpValidationError,
                        construct_type(
                            type_=HttpValidationError,  # type: ignore
                            object_=_response.json(),
                        ),
                    ),
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)


class AsyncRawTextToDialogueClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._client_wrapper = client_wrapper

    @contextlib.asynccontextmanager
    async def convert(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueConvertRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns audio.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceV1TextToDialoguePostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            The generated audio file
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    @contextlib.asynccontextmanager
    async def stream(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueStreamRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns an audio stream.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueMultiVoiceStreamingV1TextToDialogueStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            Streaming audio data
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue/stream",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    @contextlib.asynccontextmanager
    async def stream_with_timestamps(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[
        AsyncHttpResponse[typing.AsyncIterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]]
    ]:
        """
        Converts a list of text and voice ID pairs into speech (dialogue) and returns a stream of JSON blobs containing audio as a base64 encoded string and timestamps

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueStreamWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueStreamWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]]]
            Stream of transcription chunks
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/text-to-dialogue/stream/with-timestamps",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[
                typing.AsyncIterator[StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel]
            ]:
                try:
                    if 200 <= _response.status_code < 300:

                        async def _iter():
                            async for _text in _response.aiter_lines():
                                try:
                                    if len(_text) == 0:
                                        continue
                                    yield typing.cast(
                                        StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel,
                                        construct_type(
                                            type_=StreamingAudioChunkWithTimestampsAndVoiceSegmentsResponseModel,  # type: ignore
                                            object_=json.loads(_text),
                                        ),
                                    )
                                except Exception:
                                    pass
                            return

                        return AsyncHttpResponse(response=_response, data=_iter())
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    async def convert_with_timestamps(
        self,
        *,
        inputs: typing.Sequence[DialogueInput],
        output_format: typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        settings: typing.Optional[ModelSettingsResponseModel] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        apply_text_normalization: typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[AudioWithTimestampsAndVoiceSegmentsResponseModel]:
        """
        Generate dialogue from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        inputs : typing.Sequence[DialogueInput]
            A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.

        output_format : typing.Optional[TextToDialogueConvertWithTimestampsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model and text normalization. If the model does not support provided language code, an error will be returned.

        settings : typing.Optional[ModelSettingsResponseModel]
            Settings controlling the dialogue generation.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        apply_text_normalization : typing.Optional[BodyTextToDialogueFullWithTimestampsApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[AudioWithTimestampsAndVoiceSegmentsResponseModel]
            Successful Response
        """
        _response = await self._client_wrapper.httpx_client.request(
            "v1/text-to-dialogue/with-timestamps",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "inputs": convert_and_respect_annotation_metadata(
                    object_=inputs, annotation=typing.Sequence[DialogueInput], direction="write"
                ),
                "model_id": model_id,
                "language_code": language_code,
                "settings": convert_and_respect_annotation_metadata(
                    object_=settings, annotation=ModelSettingsResponseModel, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    AudioWithTimestampsAndVoiceSegmentsResponseModel,
                    construct_type(
                        type_=AudioWithTimestampsAndVoiceSegmentsResponseModel,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            if _response.status_code == 422:
                raise UnprocessableEntityError(
                    headers=dict(_response.headers),
                    body=typing.cast(
                        HttpValidationError,
                        construct_type(
                            type_=HttpValidationError,  # type: ignore
                            object_=_response.json(),
                        ),
                    ),
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
