about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py45
1 files changed, 45 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py
new file mode 100644
index 00000000..490cd71b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/bedrock/embed/cohere_transformation.py
@@ -0,0 +1,45 @@
+"""
+Transformation logic from OpenAI /v1/embeddings format to Bedrock Cohere /invoke format. 
+
+Why separate file? Make it easy to see how transformation works
+"""
+
+from typing import List
+
+from litellm.llms.cohere.embed.transformation import CohereEmbeddingConfig
+from litellm.types.llms.bedrock import CohereEmbeddingRequest
+
+
+class BedrockCohereEmbeddingConfig:
+    def __init__(self) -> None:
+        pass
+
+    def get_supported_openai_params(self) -> List[str]:
+        return ["encoding_format"]
+
+    def map_openai_params(
+        self, non_default_params: dict, optional_params: dict
+    ) -> dict:
+        for k, v in non_default_params.items():
+            if k == "encoding_format":
+                optional_params["embedding_types"] = v
+        return optional_params
+
+    def _is_v3_model(self, model: str) -> bool:
+        return "3" in model
+
+    def _transform_request(
+        self, model: str, input: List[str], inference_params: dict
+    ) -> CohereEmbeddingRequest:
+        transformed_request = CohereEmbeddingConfig()._transform_request(
+            model, input, inference_params
+        )
+
+        new_transformed_request = CohereEmbeddingRequest(
+            input_type=transformed_request["input_type"],
+        )
+        for k in CohereEmbeddingRequest.__annotations__.keys():
+            if k in transformed_request:
+                new_transformed_request[k] = transformed_request[k]  # type: ignore
+
+        return new_transformed_request