# This file was auto-generated by Fern from our API Definition.

import contextlib
import typing
from json.decoder import JSONDecodeError

from .. import core
from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.http_response import AsyncHttpResponse, HttpResponse
from ..core.request_options import RequestOptions
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from ..types.music_prompt import MusicPrompt
from .types.music_compose_detailed_request_output_format import MusicComposeDetailedRequestOutputFormat
from .types.music_compose_request_output_format import MusicComposeRequestOutputFormat
from .types.music_separate_stems_request_output_format import MusicSeparateStemsRequestOutputFormat
from .types.music_separate_stems_request_stem_variation_id import MusicSeparateStemsRequestStemVariationId
from .types.music_stream_request_output_format import MusicStreamRequestOutputFormat

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class RawMusicClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._client_wrapper = client_wrapper

    @contextlib.contextmanager
    def compose(
        self,
        *,
        output_format: typing.Optional[MusicComposeRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        respect_sections_durations: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Compose a song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicComposeRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        respect_sections_durations : typing.Optional[bool]
            Controls how strictly section durations in the `composition_plan` are enforced. Only used with `composition_plan`. When set to true, the model will precisely respect each section's `duration_ms` from the plan. When set to false, the model may adjust individual section durations which will generally lead to better generation quality and improved latency, while always preserving the total song duration from the plan.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            The generated audio file in the format specified
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/music",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "respect_sections_durations": respect_sections_durations,
                "store_for_inpainting": store_for_inpainting,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    @contextlib.contextmanager
    def compose_detailed(
        self,
        *,
        output_format: typing.Optional[MusicComposeDetailedRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        with_timestamps: typing.Optional[bool] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Compose a song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicComposeDetailedRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        with_timestamps : typing.Optional[bool]
            Whether to return the timestamps of the words in the generated song.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            Multipart/mixed response with JSON metadata and binary audio file
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/music/detailed",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "store_for_inpainting": store_for_inpainting,
                "with_timestamps": with_timestamps,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    @contextlib.contextmanager
    def stream(
        self,
        *,
        output_format: typing.Optional[MusicStreamRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Stream a composed song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            Streaming audio data in the format specified
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/music/stream",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "store_for_inpainting": store_for_inpainting,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()

    @contextlib.contextmanager
    def separate_stems(
        self,
        *,
        file: core.File,
        output_format: typing.Optional[MusicSeparateStemsRequestOutputFormat] = None,
        stem_variation_id: typing.Optional[MusicSeparateStemsRequestStemVariationId] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
        """
        Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.

        Parameters
        ----------
        file : core.File
            See core.File for more documentation

        output_format : typing.Optional[MusicSeparateStemsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        stem_variation_id : typing.Optional[MusicSeparateStemsRequestStemVariationId]
            The id of the stem variation to use.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
            ZIP archive containing separated audio stems. Each stem is provided as a separate audio file in the requested output format.
        """
        with self._client_wrapper.httpx_client.stream(
            "v1/music/stem-separation",
            method="POST",
            params={
                "output_format": output_format,
            },
            data={
                "stem_variation_id": stem_variation_id,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            files={
                "file": file,
            },
            request_options=request_options,
            omit=OMIT,
            force_multipart=True,
        ) as _response:

            def _stream() -> HttpResponse[typing.Iterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return HttpResponse(
                            response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
                        )
                    _response.read()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield _stream()


class AsyncRawMusicClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._client_wrapper = client_wrapper

    @contextlib.asynccontextmanager
    async def compose(
        self,
        *,
        output_format: typing.Optional[MusicComposeRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        respect_sections_durations: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Compose a song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicComposeRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        respect_sections_durations : typing.Optional[bool]
            Controls how strictly section durations in the `composition_plan` are enforced. Only used with `composition_plan`. When set to true, the model will precisely respect each section's `duration_ms` from the plan. When set to false, the model may adjust individual section durations which will generally lead to better generation quality and improved latency, while always preserving the total song duration from the plan.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            The generated audio file in the format specified
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/music",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "respect_sections_durations": respect_sections_durations,
                "store_for_inpainting": store_for_inpainting,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    @contextlib.asynccontextmanager
    async def compose_detailed(
        self,
        *,
        output_format: typing.Optional[MusicComposeDetailedRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        with_timestamps: typing.Optional[bool] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Compose a song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicComposeDetailedRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        with_timestamps : typing.Optional[bool]
            Whether to return the timestamps of the words in the generated song.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            Multipart/mixed response with JSON metadata and binary audio file
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/music/detailed",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "store_for_inpainting": store_for_inpainting,
                "with_timestamps": with_timestamps,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    @contextlib.asynccontextmanager
    async def stream(
        self,
        *,
        output_format: typing.Optional[MusicStreamRequestOutputFormat] = None,
        prompt: typing.Optional[str] = OMIT,
        composition_plan: typing.Optional[MusicPrompt] = OMIT,
        music_length_ms: typing.Optional[int] = OMIT,
        model_id: typing.Optional[typing.Literal["music_v1"]] = OMIT,
        force_instrumental: typing.Optional[bool] = OMIT,
        store_for_inpainting: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Stream a composed song from a prompt or a composition plan.

        Parameters
        ----------
        output_format : typing.Optional[MusicStreamRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        prompt : typing.Optional[str]
            A simple text prompt to generate a song from. Cannot be used in conjunction with `composition_plan`.

        composition_plan : typing.Optional[MusicPrompt]
            A detailed composition plan to guide music generation. Cannot be used in conjunction with `prompt`.

        music_length_ms : typing.Optional[int]
            The length of the song to generate in milliseconds. Used only in conjunction with `prompt`. Must be between 3000ms and 300000ms. Optional - if not provided, the model will choose a length based on the prompt.

        model_id : typing.Optional[typing.Literal["music_v1"]]
            The model to use for the generation.

        force_instrumental : typing.Optional[bool]
            If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the `prompt`. Can only be used with `prompt`.

        store_for_inpainting : typing.Optional[bool]
            Whether to store the generated song for inpainting. Only available to enterprise clients with access to the inpainting API.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            Streaming audio data in the format specified
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/music/stream",
            method="POST",
            params={
                "output_format": output_format,
            },
            json={
                "prompt": prompt,
                "composition_plan": convert_and_respect_annotation_metadata(
                    object_=composition_plan, annotation=MusicPrompt, direction="write"
                ),
                "music_length_ms": music_length_ms,
                "model_id": model_id,
                "force_instrumental": force_instrumental,
                "store_for_inpainting": store_for_inpainting,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()

    @contextlib.asynccontextmanager
    async def separate_stems(
        self,
        *,
        file: core.File,
        output_format: typing.Optional[MusicSeparateStemsRequestOutputFormat] = None,
        stem_variation_id: typing.Optional[MusicSeparateStemsRequestStemVariationId] = OMIT,
        sign_with_c_2_pa: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
        """
        Separate an audio file into individual stems. This endpoint might have high latency, depending on the length of the audio file.

        Parameters
        ----------
        file : core.File
            See core.File for more documentation

        output_format : typing.Optional[MusicSeparateStemsRequestOutputFormat]
            Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.

        stem_variation_id : typing.Optional[MusicSeparateStemsRequestStemVariationId]
            The id of the stem variation to use.

        sign_with_c_2_pa : typing.Optional[bool]
            Whether to sign the generated song with C2PA. Applicable only for mp3 files.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Returns
        -------
        typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
            ZIP archive containing separated audio stems. Each stem is provided as a separate audio file in the requested output format.
        """
        async with self._client_wrapper.httpx_client.stream(
            "v1/music/stem-separation",
            method="POST",
            params={
                "output_format": output_format,
            },
            data={
                "stem_variation_id": stem_variation_id,
                "sign_with_c2pa": sign_with_c_2_pa,
            },
            files={
                "file": file,
            },
            request_options=request_options,
            omit=OMIT,
            force_multipart=True,
        ) as _response:

            async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
                try:
                    if 200 <= _response.status_code < 300:
                        _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                        return AsyncHttpResponse(
                            response=_response,
                            data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
                        )
                    await _response.aread()
                    if _response.status_code == 422:
                        raise UnprocessableEntityError(
                            headers=dict(_response.headers),
                            body=typing.cast(
                                HttpValidationError,
                                construct_type(
                                    type_=HttpValidationError,  # type: ignore
                                    object_=_response.json(),
                                ),
                            ),
                        )
                    _response_json = _response.json()
                except JSONDecodeError:
                    raise ApiError(
                        status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
                    )
                raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

            yield await _stream()
