# This file was auto-generated by Fern from our API Definition.

import typing

from ....core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ....core.request_options import RequestOptions
from ....types.llm_usage_calculator_response_model import LlmUsageCalculatorResponseModel
from .raw_client import AsyncRawLlmUsageClient, RawLlmUsageClient

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class LlmUsageClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawLlmUsageClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> RawLlmUsageClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawLlmUsageClient
        """
        return self._raw_client

    def calculate(
        self,
        agent_id: str,
        *,
        prompt_length: typing.Optional[int] = OMIT,
        number_of_pages: typing.Optional[int] = OMIT,
        rag_enabled: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LlmUsageCalculatorResponseModel:
        """
        Calculates expected number of LLM tokens needed for the specified agent.

        Parameters
        ----------
        agent_id : str

        prompt_length : typing.Optional[int]
            Length of the prompt in characters.

        number_of_pages : typing.Optional[int]
            Pages of content in pdf documents OR urls in agent's Knowledge Base.

        rag_enabled : typing.Optional[bool]
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.agents.llm_usage.calculate(
            agent_id="agent_id",
        )
        """
        _response = self._raw_client.calculate(
            agent_id,
            prompt_length=prompt_length,
            number_of_pages=number_of_pages,
            rag_enabled=rag_enabled,
            request_options=request_options,
        )
        return _response.data


class AsyncLlmUsageClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawLlmUsageClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> AsyncRawLlmUsageClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawLlmUsageClient
        """
        return self._raw_client

    async def calculate(
        self,
        agent_id: str,
        *,
        prompt_length: typing.Optional[int] = OMIT,
        number_of_pages: typing.Optional[int] = OMIT,
        rag_enabled: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LlmUsageCalculatorResponseModel:
        """
        Calculates expected number of LLM tokens needed for the specified agent.

        Parameters
        ----------
        agent_id : str

        prompt_length : typing.Optional[int]
            Length of the prompt in characters.

        number_of_pages : typing.Optional[int]
            Pages of content in pdf documents OR urls in agent's Knowledge Base.

        rag_enabled : typing.Optional[bool]
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.agents.llm_usage.calculate(
                agent_id="agent_id",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.calculate(
            agent_id,
            prompt_length=prompt_length,
            number_of_pages=number_of_pages,
            rag_enabled=rag_enabled,
            request_options=request_options,
        )
        return _response.data
