# This file was auto-generated by Fern from our API Definition.

import typing

from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.request_options import RequestOptions
from ...types.llm_usage_calculator_response_model import LlmUsageCalculatorResponseModel
from .raw_client import AsyncRawLlmUsageClient, RawLlmUsageClient

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class LlmUsageClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawLlmUsageClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> RawLlmUsageClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawLlmUsageClient
        """
        return self._raw_client

    def calculate(
        self,
        *,
        prompt_length: int,
        number_of_pages: int,
        rag_enabled: bool,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LlmUsageCalculatorResponseModel:
        """
        Returns a list of LLM models and the expected cost for using them based on the provided values.

        Parameters
        ----------
        prompt_length : int
            Length of the prompt in characters.

        number_of_pages : int
            Pages of content in PDF documents or URLs in the agent's knowledge base.

        rag_enabled : bool
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.conversational_ai.llm_usage.calculate(
            prompt_length=1,
            number_of_pages=1,
            rag_enabled=True,
        )
        """
        _response = self._raw_client.calculate(
            prompt_length=prompt_length,
            number_of_pages=number_of_pages,
            rag_enabled=rag_enabled,
            request_options=request_options,
        )
        return _response.data


class AsyncLlmUsageClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawLlmUsageClient(client_wrapper=client_wrapper)

    @property
    def with_raw_response(self) -> AsyncRawLlmUsageClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawLlmUsageClient
        """
        return self._raw_client

    async def calculate(
        self,
        *,
        prompt_length: int,
        number_of_pages: int,
        rag_enabled: bool,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LlmUsageCalculatorResponseModel:
        """
        Returns a list of LLM models and the expected cost for using them based on the provided values.

        Parameters
        ----------
        prompt_length : int
            Length of the prompt in characters.

        number_of_pages : int
            Pages of content in PDF documents or URLs in the agent's knowledge base.

        rag_enabled : bool
            Whether RAG is enabled.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LlmUsageCalculatorResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.conversational_ai.llm_usage.calculate(
                prompt_length=1,
                number_of_pages=1,
                rag_enabled=True,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.calculate(
            prompt_length=prompt_length,
            number_of_pages=number_of_pages,
            rag_enabled=rag_enabled,
            request_options=request_options,
        )
        return _response.data
