o
    lWi                     @   sn   d dl Z ddlmZmZ ddlmZ ddlmZ ddlm	Z	m
Z
 e e jdZG d	d
 d
ZG dd dZdS )    N   )AsyncClientWrapperSyncClientWrapper)RequestOptions)LlmUsageCalculatorResponseModel   )AsyncRawLlmUsageClientRawLlmUsageClient.c                   @   V   e Zd ZdefddZedefddZddd	ed
ede	de
je def
ddZdS )LlmUsageClientclient_wrapperc                C      t |d| _d S N)r   )r	   _raw_clientselfr    r   q/var/www/html/asistente-voz-ia/venv/lib/python3.10/site-packages/elevenlabs/conversational_ai/llm_usage/client.py__init__      zLlmUsageClient.__init__returnc                 C      | j S )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawLlmUsageClient
        r   r   r   r   r   with_raw_response      	z LlmUsageClient.with_raw_responseNrequest_optionsprompt_lengthnumber_of_pagesrag_enabledr   c                C   s   | j j||||d}|jS )a  
        Returns a list of LLM models and the expected cost for using them based on the provided values.

        Parameters
        ----------
        prompt_length : int
            Length of the prompt in characters.

        number_of_pages : int
            Pages of content in PDF documents or URLs in the agent's knowledge base.

        rag_enabled : bool
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.llm_usage.calculate(
            prompt_length=1,
            number_of_pages=1,
            rag_enabled=True,
        )
        r   r   r    r   r   	calculatedatar   r   r   r    r   	_responser   r   r   r#      s   +zLlmUsageClient.calculate)__name__
__module____qualname__r   r   propertyr	   r   intbooltypingOptionalr   r   r#   r   r   r   r   r           r   c                   @   r
   )AsyncLlmUsageClientr   c                C   r   r   )r   r   r   r   r   r   r   R   r   zAsyncLlmUsageClient.__init__r   c                 C   r   )z
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawLlmUsageClient
        r   r   r   r   r   r   U   r   z%AsyncLlmUsageClient.with_raw_responseNr   r   r   r    r   c                   s"   | j j||||dI dH }|jS )a  
        Returns a list of LLM models and the expected cost for using them based on the provided values.

        Parameters
        ----------
        prompt_length : int
            Length of the prompt in characters.

        number_of_pages : int
            Pages of content in PDF documents or URLs in the agent's knowledge base.

        rag_enabled : bool
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.llm_usage.calculate(
                prompt_length=1,
                number_of_pages=1,
                rag_enabled=True,
            )


        asyncio.run(main())
        r!   Nr"   r%   r   r   r   r#   `   s   3zAsyncLlmUsageClient.calculate)r'   r(   r)   r   r   r*   r   r   r+   r,   r-   r.   r   r   r#   r   r   r   r   r0   Q   r/   r0   )r-   core.client_wrapperr   r   core.request_optionsr   )types.llm_usage_calculator_response_modelr   
raw_clientr   r	   castAnyOMITr   r0   r   r   r   r   <module>   s   C