From 4a52a71956a8d46fcb7294ac71734504bb09bcc2 Mon Sep 17 00:00:00 2001 From: S. Solomon Darnell Date: Fri, 28 Mar 2025 21:52:21 -0500 Subject: two version of R2R are here --- .../litellm/llms/deepseek/cost_calculator.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py (limited to '.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py') diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py new file mode 100644 index 00000000..0f4490cb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py @@ -0,0 +1,21 @@ +""" +Cost calculator for DeepSeek Chat models. + +Handles prompt caching scenario. +""" + +from typing import Tuple + +from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token +from litellm.types.utils import Usage + + +def cost_per_token(model: str, usage: Usage) -> Tuple[float, float]: + """ + Calculates the cost per token for a given model, prompt tokens, and completion tokens. + + Follows the same logic as Anthropic's cost per token calculation. + """ + return generic_cost_per_token( + model=model, usage=usage, custom_llm_provider="deepseek" + ) -- cgit v1.2.3