o
    lWin                     @  s  d dl mZ d dlZddlmZmZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZmZ ddlm Z  ddl!m"Z" ej#rsddl$m%Z%m&Z& e'ej(dZ)G dd dZ*G dd dZ+dS )    )annotationsN   )AsyncClientWrapperSyncClientWrapper)RequestOptions)AgentFailureResponseExample)AgentSuccessfulResponseExample)-ConversationHistoryTranscriptCommonModelInput)CreateUnitTestResponseModel)GetTestsPageResponseModel)#GetTestsSummariesByIdsResponseModel)GetUnitTestResponseModel)!TestFromConversationMetadataInput)UnitTestCommonModelType)$UnitTestToolCallEvaluationModelInput   )AsyncRawTestsClientRawTestsClient)*CreateUnitTestRequestDynamicVariablesValue)*UpdateUnitTestRequestDynamicVariablesValue)AsyncInvocationsClientInvocationsClient.c                   @     e Zd Zd<ddZed=ddZeeeed	d
d>dd Zd	d!d?d$d%Zeeeed	d
d@d'd(Z	d	d!dAd*d+Z
d	d!dBd/d0Zd	d	d	d	d1dCd8d9Zed:d; Zd	S )DTestsClientclient_wrapperr   c                C     t |d| _|| _d | _d S Nr   )r   _raw_client_client_wrapper_invocationsselfr    r#   m/var/www/html/asistente-voz-ia/venv/lib/python3.10/site-packages/elevenlabs/conversational_ai/tests/client.py__init__      
zTestsClient.__init__returnr   c                 C     | j S )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawTestsClient
        r   r"   r#   r#   r$   with_raw_response#      	zTestsClient.with_raw_responseNtool_call_parametersdynamic_variablestypefrom_conversation_metadatarequest_optionschat_history>typing.Sequence[ConversationHistoryTranscriptCommonModelInput]success_conditionstrsuccess_examples/typing.Sequence[AgentSuccessfulResponseExample]failure_examples,typing.Sequence[AgentFailureResponseExample]namer.   5typing.Optional[UnitTestToolCallEvaluationModelInput]r/   ^typing.Optional[typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]]r0   (typing.Optional[UnitTestCommonModelType]r1   2typing.Optional[TestFromConversationMetadataInput]r2   typing.Optional[RequestOptions]r
   c       
         C  s&   | j j|||||||||	|
d
}|jS )a	  
        Creates a new agent response test.

        Parameters
        ----------
        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        CreateUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            ConversationHistoryTranscriptCommonModelInput,
            ElevenLabs,
        )

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.create(
            chat_history=[
                ConversationHistoryTranscriptCommonModelInput(
                    role="user",
                    time_in_call_secs=1,
                )
            ],
            success_condition="success_condition",
            success_examples=[
                AgentSuccessfulResponseExample(
                    response="response",
                )
            ],
            failure_examples=[
                AgentFailureResponseExample(
                    response="response",
                )
            ],
            name="name",
        )
        
r3   r5   r7   r9   r;   r.   r/   r0   r1   r2   r   createdatar"   r3   r5   r7   r9   r;   r.   r/   r0   r1   r2   	_responser#   r#   r$   rC   .   s   VzTestsClient.creater2   test_idr   c                C     | j j||d}|jS )a  
        Gets an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.get(
            test_id="TeaqRRdTcIfIu2i7BYfT",
        )
        rG   r   getrD   r"   rH   r2   rF   r#   r#   r$   rK         zTestsClient.get^typing.Optional[typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]]c       
         C  s(   | j j|||||||||	|
|d}|jS )a
  
        Updates an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            ConversationHistoryTranscriptCommonModelInput,
            ElevenLabs,
        )

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.update(
            test_id="TeaqRRdTcIfIu2i7BYfT",
            chat_history=[
                ConversationHistoryTranscriptCommonModelInput(
                    role="user",
                    time_in_call_secs=1,
                )
            ],
            success_condition="success_condition",
            success_examples=[
                AgentSuccessfulResponseExample(
                    response="response",
                )
            ],
            failure_examples=[
                AgentFailureResponseExample(
                    response="response",
                )
            ],
            name="name",
        )
        rA   r   updaterD   r"   rH   r3   r5   r7   r9   r;   r.   r/   r0   r1   r2   rF   r#   r#   r$   rP      s   [zTestsClient.update
typing.Anyc                C  rI   )a|  
        Deletes an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.Any
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.delete(
            test_id="TeaqRRdTcIfIu2i7BYfT",
        )
        rG   r   deleterD   rL   r#   r#   r$   rT     rM   zTestsClient.deletetest_idstyping.Sequence[str]r   c                C  rI   )a  
        Gets multiple agent response tests by their IDs. Returns a dictionary mapping test IDs to test summaries.

        Parameters
        ----------
        test_ids : typing.Sequence[str]
            List of test IDs to fetch. No duplicates allowed.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsSummariesByIdsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.summaries(
            test_ids=["test_id_1", "test_id_2"],
        )
        rU   r2   r   	summariesrD   r"   rU   r2   rF   r#   r#   r$   rY   :  s   zTestsClient.summariescursor	page_sizesearchr2   r\   typing.Optional[str]r]   typing.Optional[int]r^   r   c                C  s   | j j||||d}|jS )a  
        Lists all agent response tests with pagination support and optional search filtering.

        Parameters
        ----------
        cursor : typing.Optional[str]
            Used for fetching next page. Cursor is returned in the response.

        page_size : typing.Optional[int]
            How many Tests to return at maximum. Can not exceed 100, defaults to 30.

        search : typing.Optional[str]
            Search query to filter tests by name.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsPageResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.tests.list(
            cursor="cursor",
            page_size=1,
            search="search",
        )
        r[   r   listrD   r"   r\   r]   r^   r2   rF   r#   r#   r$   rb   [  s   +zTestsClient.listc                 C  *   | j d u rddlm} || jd| _ | j S )Nr   )r   r   )r    invocations.clientr   r   )r"   r   r#   r#   r$   invocations     
zTestsClient.invocations)r   r   )r'   r   r3   r4   r5   r6   r7   r8   r9   r:   r;   r6   r.   r<   r/   r=   r0   r>   r1   r?   r2   r@   r'   r
   rH   r6   r2   r@   r'   r   rH   r6   r3   r4   r5   r6   r7   r8   r9   r:   r;   r6   r.   r<   r/   rN   r0   r>   r1   r?   r2   r@   r'   r   rH   r6   r2   r@   r'   rR   rU   rV   r2   r@   r'   r   
r\   r_   r]   r`   r^   r_   r2   r@   r'   r   __name__
__module____qualname__r%   propertyr+   OMITrC   rK   rP   rT   rY   rb   rf   r#   r#   r#   r$   r      s6    
d(j $0r   c                   @  r   )DAsyncTestsClientr   r   c                C  r   r   )r   r   r   r    r!   r#   r#   r$   r%     r&   zAsyncTestsClient.__init__r'   r   c                 C  r(   )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawTestsClient
        r)   r*   r#   r#   r$   r+     r,   z"AsyncTestsClient.with_raw_responseNr-   r3   r4   r5   r6   r7   r8   r9   r:   r;   r.   r<   r/   r=   r0   r>   r1   r?   r2   r@   r
   c       
           s.   | j j|||||||||	|
d
I dH }|jS )a:
  
        Creates a new agent response test.

        Parameters
        ----------
        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[CreateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        CreateUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            AsyncElevenLabs,
            ConversationHistoryTranscriptCommonModelInput,
        )

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.create(
                chat_history=[
                    ConversationHistoryTranscriptCommonModelInput(
                        role="user",
                        time_in_call_secs=1,
                    )
                ],
                success_condition="success_condition",
                success_examples=[
                    AgentSuccessfulResponseExample(
                        response="response",
                    )
                ],
                failure_examples=[
                    AgentFailureResponseExample(
                        response="response",
                    )
                ],
                name="name",
            )


        asyncio.run(main())
        rA   NrB   rE   r#   r#   r$   rC     s   ^zAsyncTestsClient.createrG   rH   r   c                     | j j||dI dH }|jS )a  
        Gets an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.get(
                test_id="TeaqRRdTcIfIu2i7BYfT",
            )


        asyncio.run(main())
        rG   NrJ   rL   r#   r#   r$   rK        &zAsyncTestsClient.getrN   c       
           s0   | j j|||||||||	|
|dI dH }|jS )a
  
        Updates an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        chat_history : typing.Sequence[ConversationHistoryTranscriptCommonModelInput]

        success_condition : str
            A prompt that evaluates whether the agent's response is successful. Should return True or False.

        success_examples : typing.Sequence[AgentSuccessfulResponseExample]
            Non-empty list of example responses that should be considered successful

        failure_examples : typing.Sequence[AgentFailureResponseExample]
            Non-empty list of example responses that should be considered failures

        name : str

        tool_call_parameters : typing.Optional[UnitTestToolCallEvaluationModelInput]
            How to evaluate the agent's tool call (if any). If empty, the tool call is not evaluated.

        dynamic_variables : typing.Optional[typing.Dict[str, typing.Optional[UpdateUnitTestRequestDynamicVariablesValue]]]
            Dynamic variables to replace in the agent config during testing

        type : typing.Optional[UnitTestCommonModelType]

        from_conversation_metadata : typing.Optional[TestFromConversationMetadataInput]
            Metadata of a conversation this test was created from (if applicable).

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetUnitTestResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import (
            AgentFailureResponseExample,
            AgentSuccessfulResponseExample,
            AsyncElevenLabs,
            ConversationHistoryTranscriptCommonModelInput,
        )

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.update(
                test_id="TeaqRRdTcIfIu2i7BYfT",
                chat_history=[
                    ConversationHistoryTranscriptCommonModelInput(
                        role="user",
                        time_in_call_secs=1,
                    )
                ],
                success_condition="success_condition",
                success_examples=[
                    AgentSuccessfulResponseExample(
                        response="response",
                    )
                ],
                failure_examples=[
                    AgentFailureResponseExample(
                        response="response",
                    )
                ],
                name="name",
            )


        asyncio.run(main())
        rA   NrO   rQ   r#   r#   r$   rP   :  s   czAsyncTestsClient.updaterR   c                  ru   )a  
        Deletes an agent response test by ID.

        Parameters
        ----------
        test_id : str
            The id of a chat response test. This is returned on test creation.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.Any
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.delete(
                test_id="TeaqRRdTcIfIu2i7BYfT",
            )


        asyncio.run(main())
        rG   NrS   rL   r#   r#   r$   rT     s   $zAsyncTestsClient.deleterU   rV   r   c                  ru   )aX  
        Gets multiple agent response tests by their IDs. Returns a dictionary mapping test IDs to test summaries.

        Parameters
        ----------
        test_ids : typing.Sequence[str]
            List of test IDs to fetch. No duplicates allowed.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsSummariesByIdsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.summaries(
                test_ids=["test_id_1", "test_id_2"],
            )


        asyncio.run(main())
        rW   NrX   rZ   r#   r#   r$   rY     rv   zAsyncTestsClient.summariesr[   r\   r_   r]   r`   r^   r   c                  s"   | j j||||dI dH }|jS )aD  
        Lists all agent response tests with pagination support and optional search filtering.

        Parameters
        ----------
        cursor : typing.Optional[str]
            Used for fetching next page. Cursor is returned in the response.

        page_size : typing.Optional[int]
            How many Tests to return at maximum. Can not exceed 100, defaults to 30.

        search : typing.Optional[str]
            Search query to filter tests by name.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        GetTestsPageResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.tests.list(
                cursor="cursor",
                page_size=1,
                search="search",
            )


        asyncio.run(main())
        r[   Nra   rc   r#   r#   r$   rb     s
   3zAsyncTestsClient.listc                 C  rd   )Nr   )r   r   )r    re   r   r   )r"   r   r#   r#   r$   rf   4  rg   zAsyncTestsClient.invocations)r   r   )r'   r   rh   ri   rj   rk   rl   rm   rn   r#   r#   r#   r$   rt     s8    
m2r(,8rt   ),
__future__r   typingcore.client_wrapperr   r   core.request_optionsr   $types.agent_failure_response_exampler   'types.agent_successful_response_exampler   8types.conversation_history_transcript_common_model_inputr	   %types.create_unit_test_response_modelr
   #types.get_tests_page_response_modelr   /types.get_tests_summaries_by_ids_response_modelr   "types.get_unit_test_response_modelr   +types.test_from_conversation_metadata_inputr   !types.unit_test_common_model_typer   0types.unit_test_tool_call_evaluation_model_inputr   
raw_clientr   r   6types.create_unit_test_request_dynamic_variables_valuer   6types.update_unit_test_request_dynamic_variables_valuer   TYPE_CHECKINGre   r   r   castAnyrs   r   rt   r#   r#   r#   r$   <module>   s0     y