aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py80
1 files changed, 80 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py
new file mode 100644
index 00000000..a93cb982
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/rerank_v2/transformation.py
@@ -0,0 +1,80 @@
+from typing import Any, Dict, List, Optional, Union
+
+from litellm.llms.cohere.rerank.transformation import CohereRerankConfig
+from litellm.types.rerank import OptionalRerankParams, RerankRequest
+
+class CohereRerankV2Config(CohereRerankConfig):
+ """
+ Reference: https://docs.cohere.com/v2/reference/rerank
+ """
+
+ def __init__(self) -> None:
+ pass
+
+ def get_complete_url(self, api_base: Optional[str], model: str) -> str:
+ if api_base:
+ # Remove trailing slashes and ensure clean base URL
+ api_base = api_base.rstrip("/")
+ if not api_base.endswith("/v2/rerank"):
+ api_base = f"{api_base}/v2/rerank"
+ return api_base
+ return "https://api.cohere.ai/v2/rerank"
+
+ def get_supported_cohere_rerank_params(self, model: str) -> list:
+ return [
+ "query",
+ "documents",
+ "top_n",
+ "max_tokens_per_doc",
+ "rank_fields",
+ "return_documents",
+ ]
+
+ def map_cohere_rerank_params(
+ self,
+ non_default_params: Optional[dict],
+ model: str,
+ drop_params: bool,
+ query: str,
+ documents: List[Union[str, Dict[str, Any]]],
+ custom_llm_provider: Optional[str] = None,
+ top_n: Optional[int] = None,
+ rank_fields: Optional[List[str]] = None,
+ return_documents: Optional[bool] = True,
+ max_chunks_per_doc: Optional[int] = None,
+ max_tokens_per_doc: Optional[int] = None,
+ ) -> OptionalRerankParams:
+ """
+ Map Cohere rerank params
+
+ No mapping required - returns all supported params
+ """
+ return OptionalRerankParams(
+ query=query,
+ documents=documents,
+ top_n=top_n,
+ rank_fields=rank_fields,
+ return_documents=return_documents,
+ max_tokens_per_doc=max_tokens_per_doc,
+ )
+
+ def transform_rerank_request(
+ self,
+ model: str,
+ optional_rerank_params: OptionalRerankParams,
+ headers: dict,
+ ) -> dict:
+ if "query" not in optional_rerank_params:
+ raise ValueError("query is required for Cohere rerank")
+ if "documents" not in optional_rerank_params:
+ raise ValueError("documents is required for Cohere rerank")
+ rerank_request = RerankRequest(
+ model=model,
+ query=optional_rerank_params["query"],
+ documents=optional_rerank_params["documents"],
+ top_n=optional_rerank_params.get("top_n", None),
+ rank_fields=optional_rerank_params.get("rank_fields", None),
+ return_documents=optional_rerank_params.get("return_documents", None),
+ max_tokens_per_doc=optional_rerank_params.get("max_tokens_per_doc", None),
+ )
+ return rerank_request.model_dump(exclude_none=True) \ No newline at end of file