# This file was auto-generated by Fern from our API Definition.

from __future__ import annotations

import typing

from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.request_options import RequestOptions
from ...types.agent_failure_response_example import AgentFailureResponseExample
from ...types.agent_successful_response_example import AgentSuccessfulResponseExample
from ...types.conversation_history_transcript_common_model_input import ConversationHistoryTranscriptCommonModelInput
from ...types.create_unit_test_response_model import CreateUnitTestResponseModel
from ...types.get_tests_page_response_model import GetTestsPageResponseModel
from ...types.get_tests_summaries_by_ids_response_model import GetTestsSummariesByIdsResponseModel
from ...types.get_unit_test_response_model import GetUnitTestResponseModel
from ...types.test_from_conversation_metadata_input import TestFromConversationMetadataInput
from ...types.unit_test_common_model_type import UnitTestCommonModelType
from ...types.unit_test_tool_call_evaluation_model_input import UnitTestToolCallEvaluationModelInput
from .raw_client import AsyncRawTestsClient, RawTestsClient
from .types.create_unit_test_request_dynamic_variables_value import CreateUnitTestRequestDynamicVariablesValue
from .types.update_unit_test_request_dynamic_variables_value import UpdateUnitTestRequestDynamicVariablesValue

if typing.TYPE_CHECKING:
    from .invocations.client import AsyncInvocationsClient, InvocationsClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class TestsClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawTestsClient(client_wrapper=client_wrapper)
        self._client_wrapper = client_wrapper
        self._invocations: typing.Optional[InvocationsClient] = None

    @property
    def with_raw_response(self) -> RawTestsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawTestsClient
        """
        return self._raw_client

    def create(
        self,
        *,
        chat_history: typing.Sequence[ConversationHistoryTranscriptCommonModelInput],
        success_condition: str,
        success_examples: typing.Sequence[AgentSuccessfulResponseExample],
        failure_examples: typing.Sequence[AgentFailureResponseExample],
        name: str,
        tool_call_parameters: typing.Optional[UnitTestToolCallEvaluationModelInput] = OMIT,
        dynamic_variables: typing.Optional[
            typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]
        ] = OMIT,
        type: typing.Optional[UnitTestCommonModelType] = OMIT,
        from_conversation_metadata: typing.Optional[TestFromConversationMetadataInput] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> CreateUnitTestResponseModel:
        """
        Creates a new agent response test.

        Parameters
        ----------
        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        CreateUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            ConversationHistoryTranscriptCommonModelInput,
            ElevenLabs,
        )

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.create(
            chat_history=[
                ConversationHistoryTranscriptCommonModelInput(
                    role="user",
                    time_in_call_secs=1,
                )
            ],
            success_condition="success_condition",
            success_examples=[
                AgentSuccessfulResponseExample(
                    response="response",
                )
            ],
            failure_examples=[
                AgentFailureResponseExample(
                    response="response",
                )
            ],
            name="name",
        )
        """
        _response = self._raw_client.create(
            chat_history=chat_history,
            success_condition=success_condition,
            success_examples=success_examples,
            failure_examples=failure_examples,
            name=name,
            tool_call_parameters=tool_call_parameters,
            dynamic_variables=dynamic_variables,
            type=type,
            from_conversation_metadata=from_conversation_metadata,
            request_options=request_options,
        )
        return _response.data

    def get(self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> GetUnitTestResponseModel:
        """
        Gets an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.get(
            test_id="TeaqRRdTcIfIu2i7BYfT",
        )
        """
        _response = self._raw_client.get(test_id, request_options=request_options)
        return _response.data

    def update(
        self,
        test_id: str,
        *,
        chat_history: typing.Sequence[ConversationHistoryTranscriptCommonModelInput],
        success_condition: str,
        success_examples: typing.Sequence[AgentSuccessfulResponseExample],
        failure_examples: typing.Sequence[AgentFailureResponseExample],
        name: str,
        tool_call_parameters: typing.Optional[UnitTestToolCallEvaluationModelInput] = OMIT,
        dynamic_variables: typing.Optional[
            typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]
        ] = OMIT,
        type: typing.Optional[UnitTestCommonModelType] = OMIT,
        from_conversation_metadata: typing.Optional[TestFromConversationMetadataInput] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetUnitTestResponseModel:
        """
        Updates an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            ConversationHistoryTranscriptCommonModelInput,
            ElevenLabs,
        )

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.update(
            test_id="TeaqRRdTcIfIu2i7BYfT",
            chat_history=[
                ConversationHistoryTranscriptCommonModelInput(
                    role="user",
                    time_in_call_secs=1,
                )
            ],
            success_condition="success_condition",
            success_examples=[
                AgentSuccessfulResponseExample(
                    response="response",
                )
            ],
            failure_examples=[
                AgentFailureResponseExample(
                    response="response",
                )
            ],
            name="name",
        )
        """
        _response = self._raw_client.update(
            test_id,
            chat_history=chat_history,
            success_condition=success_condition,
            success_examples=success_examples,
            failure_examples=failure_examples,
            name=name,
            tool_call_parameters=tool_call_parameters,
            dynamic_variables=dynamic_variables,
            type=type,
            from_conversation_metadata=from_conversation_metadata,
            request_options=request_options,
        )
        return _response.data

    def delete(self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
        """
        Deletes an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.Any
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.delete(
            test_id="TeaqRRdTcIfIu2i7BYfT",
        )
        """
        _response = self._raw_client.delete(test_id, request_options=request_options)
        return _response.data

    def summaries(
        self, *, test_ids: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
    ) -> GetTestsSummariesByIdsResponseModel:
        """
        Gets multiple agent response tests by their IDs. Returns a dictionary mapping test IDs to test summaries.

        Parameters
        ----------
        test_ids : typing.Sequence[str]
            List of test IDs to fetch. No duplicates allowed.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsSummariesByIdsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.summaries(
            test_ids=["test_id_1", "test_id_2"],
        )
        """
        _response = self._raw_client.summaries(test_ids=test_ids, request_options=request_options)
        return _response.data

    def list(
        self,
        *,
        cursor: typing.Optional[str] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetTestsPageResponseModel:
        """
        Lists all agent response tests with pagination support and optional search filtering.

        Parameters
        ----------
        cursor : typing.Optional[str]
            Used for fetching next page. Cursor is returned in the response.

        page_size : typing.Optional[int]
            How many Tests to return at maximum. Can not exceed 100, defaults to 30.

        search : typing.Optional[str]
            Search query to filter tests by name.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsPageResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.list(
            cursor="cursor",
            page_size=1,
            search="search",
        )
        """
        _response = self._raw_client.list(
            cursor=cursor, page_size=page_size, search=search, request_options=request_options
        )
        return _response.data

    @property
    def invocations(self):
        if self._invocations is None:
            from .invocations.client import InvocationsClient  # noqa: E402

            self._invocations = InvocationsClient(client_wrapper=self._client_wrapper)
        return self._invocations


class AsyncTestsClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawTestsClient(client_wrapper=client_wrapper)
        self._client_wrapper = client_wrapper
        self._invocations: typing.Optional[AsyncInvocationsClient] = None

    @property
    def with_raw_response(self) -> AsyncRawTestsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawTestsClient
        """
        return self._raw_client

    async def create(
        self,
        *,
        chat_history: typing.Sequence[ConversationHistoryTranscriptCommonModelInput],
        success_condition: str,
        success_examples: typing.Sequence[AgentSuccessfulResponseExample],
        failure_examples: typing.Sequence[AgentFailureResponseExample],
        name: str,
        tool_call_parameters: typing.Optional[UnitTestToolCallEvaluationModelInput] = OMIT,
        dynamic_variables: typing.Optional[
            typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]
        ] = OMIT,
        type: typing.Optional[UnitTestCommonModelType] = OMIT,
        from_conversation_metadata: typing.Optional[TestFromConversationMetadataInput] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> CreateUnitTestResponseModel:
        """
        Creates a new agent response test.

        Parameters
        ----------
        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        CreateUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            AsyncElevenLabs,
            ConversationHistoryTranscriptCommonModelInput,
        )

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.create(
                chat_history=[
                    ConversationHistoryTranscriptCommonModelInput(
                        role="user",
                        time_in_call_secs=1,
                    )
                ],
                success_condition="success_condition",
                success_examples=[
                    AgentSuccessfulResponseExample(
                        response="response",
                    )
                ],
                failure_examples=[
                    AgentFailureResponseExample(
                        response="response",
                    )
                ],
                name="name",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.create(
            chat_history=chat_history,
            success_condition=success_condition,
            success_examples=success_examples,
            failure_examples=failure_examples,
            name=name,
            tool_call_parameters=tool_call_parameters,
            dynamic_variables=dynamic_variables,
            type=type,
            from_conversation_metadata=from_conversation_metadata,
            request_options=request_options,
        )
        return _response.data

    async def get(
        self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None
    ) -> GetUnitTestResponseModel:
        """
        Gets an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.get(
                test_id="TeaqRRdTcIfIu2i7BYfT",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.get(test_id, request_options=request_options)
        return _response.data

    async def update(
        self,
        test_id: str,
        *,
        chat_history: typing.Sequence[ConversationHistoryTranscriptCommonModelInput],
        success_condition: str,
        success_examples: typing.Sequence[AgentSuccessfulResponseExample],
        failure_examples: typing.Sequence[AgentFailureResponseExample],
        name: str,
        tool_call_parameters: typing.Optional[UnitTestToolCallEvaluationModelInput] = OMIT,
        dynamic_variables: typing.Optional[
            typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]
        ] = OMIT,
        type: typing.Optional[UnitTestCommonModelType] = OMIT,
        from_conversation_metadata: typing.Optional[TestFromConversationMetadataInput] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetUnitTestResponseModel:
        """
        Updates an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            AsyncElevenLabs,
            ConversationHistoryTranscriptCommonModelInput,
        )

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.update(
                test_id="TeaqRRdTcIfIu2i7BYfT",
                chat_history=[
                    ConversationHistoryTranscriptCommonModelInput(
                        role="user",
                        time_in_call_secs=1,
                    )
                ],
                success_condition="success_condition",
                success_examples=[
                    AgentSuccessfulResponseExample(
                        response="response",
                    )
                ],
                failure_examples=[
                    AgentFailureResponseExample(
                        response="response",
                    )
                ],
                name="name",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.update(
            test_id,
            chat_history=chat_history,
            success_condition=success_condition,
            success_examples=success_examples,
            failure_examples=failure_examples,
            name=name,
            tool_call_parameters=tool_call_parameters,
            dynamic_variables=dynamic_variables,
            type=type,
            from_conversation_metadata=from_conversation_metadata,
            request_options=request_options,
        )
        return _response.data

    async def delete(self, test_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any:
        """
        Deletes an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.Any
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.delete(
                test_id="TeaqRRdTcIfIu2i7BYfT",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.delete(test_id, request_options=request_options)
        return _response.data

    async def summaries(
        self, *, test_ids: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
    ) -> GetTestsSummariesByIdsResponseModel:
        """
        Gets multiple agent response tests by their IDs. Returns a dictionary mapping test IDs to test summaries.

        Parameters
        ----------
        test_ids : typing.Sequence[str]
            List of test IDs to fetch. No duplicates allowed.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsSummariesByIdsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.summaries(
                test_ids=["test_id_1", "test_id_2"],
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.summaries(test_ids=test_ids, request_options=request_options)
        return _response.data

    async def list(
        self,
        *,
        cursor: typing.Optional[str] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> GetTestsPageResponseModel:
        """
        Lists all agent response tests with pagination support and optional search filtering.

        Parameters
        ----------
        cursor : typing.Optional[str]
            Used for fetching next page. Cursor is returned in the response.

        page_size : typing.Optional[int]
            How many Tests to return at maximum. Can not exceed 100, defaults to 30.

        search : typing.Optional[str]
            Search query to filter tests by name.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsPageResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.list(
                cursor="cursor",
                page_size=1,
                search="search",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.list(
            cursor=cursor, page_size=page_size, search=search, request_options=request_options
        )
        return _response.data

    @property
    def invocations(self):
        if self._invocations is None:
            from .invocations.client import AsyncInvocationsClient  # noqa: E402

            self._invocations = AsyncInvocationsClient(client_wrapper=self._client_wrapper)
        return self._invocations
