# This file was auto-generated by Fern from our API Definition.

import typing

from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.request_options import RequestOptions
from ...types.batch_call_detailed_response import BatchCallDetailedResponse
from ...types.batch_call_response import BatchCallResponse
from ...types.outbound_call_recipient import OutboundCallRecipient
from ...types.workspace_batch_calls_response import WorkspaceBatchCallsResponse
from .raw_client import AsyncRawBatchCallsClient, RawBatchCallsClient

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class BatchCallsClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawBatchCallsClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> RawBatchCallsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawBatchCallsClient
        """
        return self._raw_client

    def create(
        self,
        *,
        call_name: str,
        agent_id: str,
        recipients: typing.Sequence[OutboundCallRecipient],
        scheduled_time_unix: typing.Optional[int] = OMIT,
        agent_phone_number_id: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> BatchCallResponse:
        """
        Submit a batch call request to schedule calls for multiple recipients.

        Parameters
        ----------
        call_name : str

        agent_id : str

        recipients : typing.Sequence[OutboundCallRecipient]

        scheduled_time_unix : typing.Optional[int]

        agent_phone_number_id : typing.Optional[str]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs, OutboundCallRecipient

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.batch_calls.create(
            call_name="call_name",
            agent_id="agent_id",
            recipients=[OutboundCallRecipient()],
        )
        """
        _response = self._raw_client.create(
            call_name=call_name,
            agent_id=agent_id,
            recipients=recipients,
            scheduled_time_unix=scheduled_time_unix,
            agent_phone_number_id=agent_phone_number_id,
            request_options=request_options,
        )
        return _response.data

    def list(
        self,
        *,
        limit: typing.Optional[int] = None,
        last_doc: typing.Optional[str] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> WorkspaceBatchCallsResponse:
        """
        Get all batch calls for the current workspace.

        Parameters
        ----------
        limit : typing.Optional[int]

        last_doc : typing.Optional[str]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        WorkspaceBatchCallsResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.batch_calls.list(
            limit=1,
            last_doc="last_doc",
        )
        """
        _response = self._raw_client.list(limit=limit, last_doc=last_doc, request_options=request_options)
        return _response.data

    def get(
        self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
    ) -> BatchCallDetailedResponse:
        """
        Get detailed information about a batch call including all recipients.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallDetailedResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.batch_calls.get(
            batch_id="batch_id",
        )
        """
        _response = self._raw_client.get(batch_id, request_options=request_options)
        return _response.data

    def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchCallResponse:
        """
        Cancel a running batch call and set all recipients to cancelled status.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.batch_calls.cancel(
            batch_id="batch_id",
        )
        """
        _response = self._raw_client.cancel(batch_id, request_options=request_options)
        return _response.data

    def retry(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchCallResponse:
        """
        Retry a batch call, calling failed and no-response recipients again.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.batch_calls.retry(
            batch_id="batch_id",
        )
        """
        _response = self._raw_client.retry(batch_id, request_options=request_options)
        return _response.data


class AsyncBatchCallsClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawBatchCallsClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> AsyncRawBatchCallsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawBatchCallsClient
        """
        return self._raw_client

    async def create(
        self,
        *,
        call_name: str,
        agent_id: str,
        recipients: typing.Sequence[OutboundCallRecipient],
        scheduled_time_unix: typing.Optional[int] = OMIT,
        agent_phone_number_id: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> BatchCallResponse:
        """
        Submit a batch call request to schedule calls for multiple recipients.

        Parameters
        ----------
        call_name : str

        agent_id : str

        recipients : typing.Sequence[OutboundCallRecipient]

        scheduled_time_unix : typing.Optional[int]

        agent_phone_number_id : typing.Optional[str]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs, OutboundCallRecipient

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.batch_calls.create(
                call_name="call_name",
                agent_id="agent_id",
                recipients=[OutboundCallRecipient()],
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.create(
            call_name=call_name,
            agent_id=agent_id,
            recipients=recipients,
            scheduled_time_unix=scheduled_time_unix,
            agent_phone_number_id=agent_phone_number_id,
            request_options=request_options,
        )
        return _response.data

    async def list(
        self,
        *,
        limit: typing.Optional[int] = None,
        last_doc: typing.Optional[str] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> WorkspaceBatchCallsResponse:
        """
        Get all batch calls for the current workspace.

        Parameters
        ----------
        limit : typing.Optional[int]

        last_doc : typing.Optional[str]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        WorkspaceBatchCallsResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.batch_calls.list(
                limit=1,
                last_doc="last_doc",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.list(limit=limit, last_doc=last_doc, request_options=request_options)
        return _response.data

    async def get(
        self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
    ) -> BatchCallDetailedResponse:
        """
        Get detailed information about a batch call including all recipients.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallDetailedResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.batch_calls.get(
                batch_id="batch_id",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.get(batch_id, request_options=request_options)
        return _response.data

    async def cancel(
        self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
    ) -> BatchCallResponse:
        """
        Cancel a running batch call and set all recipients to cancelled status.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.batch_calls.cancel(
                batch_id="batch_id",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.cancel(batch_id, request_options=request_options)
        return _response.data

    async def retry(
        self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
    ) -> BatchCallResponse:
        """
        Retry a batch call, calling failed and no-response recipients again.

        Parameters
        ----------
        batch_id : str

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        BatchCallResponse
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.batch_calls.retry(
                batch_id="batch_id",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.retry(batch_id, request_options=request_options)
        return _response.data
