about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion/transformation.py61
1 files changed, 61 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion/transformation.py
new file mode 100644
index 00000000..607e709c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/fireworks_ai/completion/transformation.py
@@ -0,0 +1,61 @@
+from typing import List, Union
+
+from litellm.types.llms.openai import AllMessageValues, OpenAITextCompletionUserMessage
+
+from ...base_llm.completion.transformation import BaseTextCompletionConfig
+from ...openai.completion.utils import _transform_prompt
+from ..common_utils import FireworksAIMixin
+
+
+class FireworksAITextCompletionConfig(FireworksAIMixin, BaseTextCompletionConfig):
+    def get_supported_openai_params(self, model: str) -> list:
+        """
+        See how LiteLLM supports Provider-specific parameters - https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage
+        """
+        return [
+            "max_tokens",
+            "logprobs",
+            "echo",
+            "temperature",
+            "top_p",
+            "top_k",
+            "frequency_penalty",
+            "presence_penalty",
+            "n",
+            "stop",
+            "response_format",
+            "stream",
+            "user",
+        ]
+
+    def map_openai_params(
+        self,
+        non_default_params: dict,
+        optional_params: dict,
+        model: str,
+        drop_params: bool,
+    ) -> dict:
+        supported_params = self.get_supported_openai_params(model)
+        for k, v in non_default_params.items():
+            if k in supported_params:
+                optional_params[k] = v
+        return optional_params
+
+    def transform_text_completion_request(
+        self,
+        model: str,
+        messages: Union[List[AllMessageValues], List[OpenAITextCompletionUserMessage]],
+        optional_params: dict,
+        headers: dict,
+    ) -> dict:
+        prompt = _transform_prompt(messages=messages)
+
+        if not model.startswith("accounts/"):
+            model = f"accounts/fireworks/models/{model}"
+
+        data = {
+            "model": model,
+            "prompt": prompt,
+            **optional_params,
+        }
+        return data