about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py64
1 files changed, 64 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py b/.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py
new file mode 100644
index 00000000..e4a78104
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/volcengine.py
@@ -0,0 +1,64 @@
+from typing import Optional, Union
+
+from litellm.llms.openai_like.chat.transformation import OpenAILikeChatConfig
+
+
+class VolcEngineConfig(OpenAILikeChatConfig):
+    frequency_penalty: Optional[int] = None
+    function_call: Optional[Union[str, dict]] = None
+    functions: Optional[list] = None
+    logit_bias: Optional[dict] = None
+    max_tokens: Optional[int] = None
+    n: Optional[int] = None
+    presence_penalty: Optional[int] = None
+    stop: Optional[Union[str, list]] = None
+    temperature: Optional[int] = None
+    top_p: Optional[int] = None
+    response_format: Optional[dict] = None
+
+    def __init__(
+        self,
+        frequency_penalty: Optional[int] = None,
+        function_call: Optional[Union[str, dict]] = None,
+        functions: Optional[list] = None,
+        logit_bias: Optional[dict] = None,
+        max_tokens: Optional[int] = None,
+        n: Optional[int] = None,
+        presence_penalty: Optional[int] = None,
+        stop: Optional[Union[str, list]] = None,
+        temperature: Optional[int] = None,
+        top_p: Optional[int] = None,
+        response_format: Optional[dict] = None,
+    ) -> None:
+        locals_ = locals().copy()
+        for key, value in locals_.items():
+            if key != "self" and value is not None:
+                setattr(self.__class__, key, value)
+
+    @classmethod
+    def get_config(cls):
+        return super().get_config()
+
+    def get_supported_openai_params(self, model: str) -> list:
+        return [
+            "frequency_penalty",
+            "logit_bias",
+            "logprobs",
+            "top_logprobs",
+            "max_completion_tokens",
+            "max_tokens",
+            "n",
+            "presence_penalty",
+            "seed",
+            "stop",
+            "stream",
+            "stream_options",
+            "temperature",
+            "top_p",
+            "tools",
+            "tool_choice",
+            "function_call",
+            "functions",
+            "max_retries",
+            "extra_headers",
+        ]  # works across all models