aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/deepseek
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/deepseek
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/deepseek')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/deepseek/chat/transformation.py55
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py21
2 files changed, 76 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/chat/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/chat/transformation.py
new file mode 100644
index 00000000..180cf7dc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/chat/transformation.py
@@ -0,0 +1,55 @@
+"""
+Translates from OpenAI's `/v1/chat/completions` to DeepSeek's `/v1/chat/completions`
+"""
+
+from typing import List, Optional, Tuple
+
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ handle_messages_with_content_list_to_str_conversion,
+)
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
+
+from ...openai.chat.gpt_transformation import OpenAIGPTConfig
+
+
+class DeepSeekChatConfig(OpenAIGPTConfig):
+
+ def _transform_messages(
+ self, messages: List[AllMessageValues], model: str
+ ) -> List[AllMessageValues]:
+ """
+ DeepSeek does not support content in list format.
+ """
+ messages = handle_messages_with_content_list_to_str_conversion(messages)
+ return super()._transform_messages(messages=messages, model=model)
+
+ def _get_openai_compatible_provider_info(
+ self, api_base: Optional[str], api_key: Optional[str]
+ ) -> Tuple[Optional[str], Optional[str]]:
+ api_base = (
+ api_base
+ or get_secret_str("DEEPSEEK_API_BASE")
+ or "https://api.deepseek.com/beta"
+ ) # type: ignore
+ dynamic_api_key = api_key or get_secret_str("DEEPSEEK_API_KEY")
+ return api_base, dynamic_api_key
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ """
+ If api_base is not provided, use the default DeepSeek /chat/completions endpoint.
+ """
+ if not api_base:
+ api_base = "https://api.deepseek.com/beta"
+
+ if not api_base.endswith("/chat/completions"):
+ api_base = f"{api_base}/chat/completions"
+
+ return api_base
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py
new file mode 100644
index 00000000..0f4490cb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/deepseek/cost_calculator.py
@@ -0,0 +1,21 @@
+"""
+Cost calculator for DeepSeek Chat models.
+
+Handles prompt caching scenario.
+"""
+
+from typing import Tuple
+
+from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token
+from litellm.types.utils import Usage
+
+
+def cost_per_token(model: str, usage: Usage) -> Tuple[float, float]:
+ """
+ Calculates the cost per token for a given model, prompt tokens, and completion tokens.
+
+ Follows the same logic as Anthropic's cost per token calculation.
+ """
+ return generic_cost_per_token(
+ model=model, usage=usage, custom_llm_provider="deepseek"
+ )