# This file was auto-generated by Fern from our API Definition.

import typing

from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.request_options import RequestOptions
from ...types.conv_ai_webhooks import ConvAiWebhooks
from ...types.conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook
from ...types.get_conv_ai_settings_response_model import GetConvAiSettingsResponseModel
from ...types.livekit_stack_type import LivekitStackType
from .raw_client import AsyncRawSettingsClient, RawSettingsClient

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class SettingsClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawSettingsClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> RawSettingsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawSettingsClient
        """
        return self._raw_client

    def get(self, *, request_options: typing.Optional[RequestOptions] = None) -> GetConvAiSettingsResponseModel:
        """
        Retrieve Convai settings for the workspace

        Parameters
        ----------
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetConvAiSettingsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.settings.get()
        """
        _response = self._raw_client.get(request_options=request_options)
        return _response.data

    def update(
        self,
        *,
        conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT,
        webhooks: typing.Optional[ConvAiWebhooks] = OMIT,
        can_use_mcp_servers: typing.Optional[bool] = OMIT,
        rag_retention_period_days: typing.Optional[int] = OMIT,
        default_livekit_stack: typing.Optional[LivekitStackType] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetConvAiSettingsResponseModel:
        """
        Update Convai settings for the workspace

        Parameters
        ----------
        conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook]

        webhooks : typing.Optional[ConvAiWebhooks]

        can_use_mcp_servers : typing.Optional[bool]
            Whether the workspace can use MCP servers

        rag_retention_period_days : typing.Optional[int]

        default_livekit_stack : typing.Optional[LivekitStackType]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetConvAiSettingsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.settings.update()
        """
        _response = self._raw_client.update(
            conversation_initiation_client_data_webhook=conversation_initiation_client_data_webhook,
            webhooks=webhooks,
            can_use_mcp_servers=can_use_mcp_servers,
            rag_retention_period_days=rag_retention_period_days,
            default_livekit_stack=default_livekit_stack,
            request_options=request_options,
        )
        return _response.data


class AsyncSettingsClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawSettingsClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> AsyncRawSettingsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawSettingsClient
        """
        return self._raw_client

    async def get(self, *, request_options: typing.Optional[RequestOptions] = None) -> GetConvAiSettingsResponseModel:
        """
        Retrieve Convai settings for the workspace

        Parameters
        ----------
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetConvAiSettingsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.settings.get()


        asyncio.run(main())
        """
        _response = await self._raw_client.get(request_options=request_options)
        return _response.data

    async def update(
        self,
        *,
        conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT,
        webhooks: typing.Optional[ConvAiWebhooks] = OMIT,
        can_use_mcp_servers: typing.Optional[bool] = OMIT,
        rag_retention_period_days: typing.Optional[int] = OMIT,
        default_livekit_stack: typing.Optional[LivekitStackType] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetConvAiSettingsResponseModel:
        """
        Update Convai settings for the workspace

        Parameters
        ----------
        conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook]

        webhooks : typing.Optional[ConvAiWebhooks]

        can_use_mcp_servers : typing.Optional[bool]
            Whether the workspace can use MCP servers

        rag_retention_period_days : typing.Optional[int]

        default_livekit_stack : typing.Optional[LivekitStackType]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetConvAiSettingsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.settings.update()


        asyncio.run(main())
        """
        _response = await self._raw_client.update(
            conversation_initiation_client_data_webhook=conversation_initiation_client_data_webhook,
            webhooks=webhooks,
            can_use_mcp_servers=can_use_mcp_servers,
            rag_retention_period_days=rag_retention_period_days,
            default_livekit_stack=default_livekit_stack,
            request_options=request_options,
        )
        return _response.data
