# This file was auto-generated by Fern from our API Definition.

import typing

from .. import core
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.request_options import RequestOptions
from .raw_client import AsyncRawSpeechToSpeechClient, RawSpeechToSpeechClient
from .types.speech_to_speech_convert_request_file_format import SpeechToSpeechConvertRequestFileFormat
from .types.speech_to_speech_convert_request_output_format import SpeechToSpeechConvertRequestOutputFormat
from .types.speech_to_speech_stream_request_file_format import SpeechToSpeechStreamRequestFileFormat
from .types.speech_to_speech_stream_request_output_format import SpeechToSpeechStreamRequestOutputFormat

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class SpeechToSpeechClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawSpeechToSpeechClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> RawSpeechToSpeechClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawSpeechToSpeechClient
        """
        return self._raw_client

    def convert(
        self,
        voice_id: str,
        *,
        audio: core.File,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[SpeechToSpeechConvertRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[str] = OMIT,
        seed: typing.Optional[int] = OMIT,
        remove_background_noise: typing.Optional[bool] = OMIT,
        file_format: typing.Optional[SpeechToSpeechConvertRequestFileFormat] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[bytes]:
        """
        Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.

        Parameters
        ----------
        voice_id : str
            ID of the voice to be used. Use the [Get voices](/docs/api-reference/voices/search) endpoint list all the available voices.

        audio : core.File
            See core.File for more documentation

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[SpeechToSpeechConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.

        voice_settings : typing.Optional[str]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        remove_background_noise : typing.Optional[bool]
            If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.

        file_format : typing.Optional[SpeechToSpeechConvertRequestFileFormat]
            The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[bytes]
            The generated audio file

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.speech_to_speech.convert(
            voice_id="JBFqnCBsd6RMkjVDRZzb",
            output_format="mp3_44100_128",
            model_id="eleven_multilingual_sts_v2",
        )
        """
        with self._raw_client.convert(
            voice_id,
            audio=audio,
            enable_logging=enable_logging,
            optimize_streaming_latency=optimize_streaming_latency,
            output_format=output_format,
            model_id=model_id,
            voice_settings=voice_settings,
            seed=seed,
            remove_background_noise=remove_background_noise,
            file_format=file_format,
            request_options=request_options,
        ) as r:
            yield from r.data

    def stream(
        self,
        voice_id: str,
        *,
        audio: core.File,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[SpeechToSpeechStreamRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[str] = OMIT,
        seed: typing.Optional[int] = OMIT,
        remove_background_noise: typing.Optional[bool] = OMIT,
        file_format: typing.Optional[SpeechToSpeechStreamRequestFileFormat] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[bytes]:
        """
        Stream audio from one voice to another. Maintain full control over emotion, timing and delivery.

        Parameters
        ----------
        voice_id : str
            ID of the voice to be used. Use the [Get voices](/docs/api-reference/voices/search) endpoint list all the available voices.

        audio : core.File
            See core.File for more documentation

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[SpeechToSpeechStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.

        voice_settings : typing.Optional[str]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        remove_background_noise : typing.Optional[bool]
            If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.

        file_format : typing.Optional[SpeechToSpeechStreamRequestFileFormat]
            The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[bytes]
            Streaming audio data

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.speech_to_speech.stream(
            voice_id="JBFqnCBsd6RMkjVDRZzb",
            output_format="mp3_44100_128",
            model_id="eleven_multilingual_sts_v2",
        )
        """
        with self._raw_client.stream(
            voice_id,
            audio=audio,
            enable_logging=enable_logging,
            optimize_streaming_latency=optimize_streaming_latency,
            output_format=output_format,
            model_id=model_id,
            voice_settings=voice_settings,
            seed=seed,
            remove_background_noise=remove_background_noise,
            file_format=file_format,
            request_options=request_options,
        ) as r:
            yield from r.data


class AsyncSpeechToSpeechClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawSpeechToSpeechClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> AsyncRawSpeechToSpeechClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawSpeechToSpeechClient
        """
        return self._raw_client

    async def convert(
        self,
        voice_id: str,
        *,
        audio: core.File,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[SpeechToSpeechConvertRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[str] = OMIT,
        seed: typing.Optional[int] = OMIT,
        remove_background_noise: typing.Optional[bool] = OMIT,
        file_format: typing.Optional[SpeechToSpeechConvertRequestFileFormat] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[bytes]:
        """
        Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.

        Parameters
        ----------
        voice_id : str
            ID of the voice to be used. Use the [Get voices](/docs/api-reference/voices/search) endpoint list all the available voices.

        audio : core.File
            See core.File for more documentation

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[SpeechToSpeechConvertRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.

        voice_settings : typing.Optional[str]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        remove_background_noise : typing.Optional[bool]
            If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.

        file_format : typing.Optional[SpeechToSpeechConvertRequestFileFormat]
            The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[bytes]
            The generated audio file

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.speech_to_speech.convert(
                voice_id="JBFqnCBsd6RMkjVDRZzb",
                output_format="mp3_44100_128",
                model_id="eleven_multilingual_sts_v2",
            )


        asyncio.run(main())
        """
        async with self._raw_client.convert(
            voice_id,
            audio=audio,
            enable_logging=enable_logging,
            optimize_streaming_latency=optimize_streaming_latency,
            output_format=output_format,
            model_id=model_id,
            voice_settings=voice_settings,
            seed=seed,
            remove_background_noise=remove_background_noise,
            file_format=file_format,
            request_options=request_options,
        ) as r:
            async for _chunk in r.data:
                yield _chunk

    async def stream(
        self,
        voice_id: str,
        *,
        audio: core.File,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[SpeechToSpeechStreamRequestOutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[str] = OMIT,
        seed: typing.Optional[int] = OMIT,
        remove_background_noise: typing.Optional[bool] = OMIT,
        file_format: typing.Optional[SpeechToSpeechStreamRequestFileFormat] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[bytes]:
        """
        Stream audio from one voice to another. Maintain full control over emotion, timing and delivery.

        Parameters
        ----------
        voice_id : str
            ID of the voice to be used. Use the [Get voices](/docs/api-reference/voices/search) endpoint list all the available voices.

        audio : core.File
            See core.File for more documentation

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[SpeechToSpeechStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.

        voice_settings : typing.Optional[str]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        remove_background_noise : typing.Optional[bool]
            If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.

        file_format : typing.Optional[SpeechToSpeechStreamRequestFileFormat]
            The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[bytes]
            Streaming audio data

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.speech_to_speech.stream(
                voice_id="JBFqnCBsd6RMkjVDRZzb",
                output_format="mp3_44100_128",
                model_id="eleven_multilingual_sts_v2",
            )


        asyncio.run(main())
        """
        async with self._raw_client.stream(
            voice_id,
            audio=audio,
            enable_logging=enable_logging,
            optimize_streaming_latency=optimize_streaming_latency,
            output_format=output_format,
            model_id=model_id,
            voice_settings=voice_settings,
            seed=seed,
            remove_background_noise=remove_background_noise,
            file_format=file_format,
            request_options=request_options,
        ) as r:
            async for _chunk in r.data:
                yield _chunk
