From 4a52a71956a8d46fcb7294ac71734504bb09bcc2 Mon Sep 17 00:00:00 2001 From: S. Solomon Darnell Date: Fri, 28 Mar 2025 21:52:21 -0500 Subject: two version of R2R are here --- .../litellm/llms/anthropic/cost_calculation.py | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .venv/lib/python3.12/site-packages/litellm/llms/anthropic/cost_calculation.py (limited to '.venv/lib/python3.12/site-packages/litellm/llms/anthropic/cost_calculation.py') diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/cost_calculation.py b/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/cost_calculation.py new file mode 100644 index 00000000..0dbe19ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/cost_calculation.py @@ -0,0 +1,25 @@ +""" +Helper util for handling anthropic-specific cost calculation +- e.g.: prompt caching +""" + +from typing import Tuple + +from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token +from litellm.types.utils import Usage + + +def cost_per_token(model: str, usage: Usage) -> Tuple[float, float]: + """ + Calculates the cost per token for a given model, prompt tokens, and completion tokens. + + Input: + - model: str, the model name without provider prefix + - usage: LiteLLM Usage block, containing anthropic caching information + + Returns: + Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd + """ + return generic_cost_per_token( + model=model, usage=usage, custom_llm_provider="anthropic" + ) -- cgit v1.2.3