aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/jina_ai')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/embedding/transformation.py78
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/handler.py3
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/transformation.py143
3 files changed, 224 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/embedding/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/embedding/transformation.py
new file mode 100644
index 00000000..5263be90
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/embedding/transformation.py
@@ -0,0 +1,78 @@
+"""
+Transformation logic from OpenAI /v1/embeddings format to Jina AI's `/v1/embeddings` format.
+
+Why separate file? Make it easy to see how transformation works
+
+Docs - https://jina.ai/embeddings/
+"""
+
+import types
+from typing import List, Optional, Tuple
+
+from litellm import LlmProviders
+from litellm.secret_managers.main import get_secret_str
+
+
+class JinaAIEmbeddingConfig:
+ """
+ Reference: https://jina.ai/embeddings/
+ """
+
+ def __init__(
+ self,
+ ) -> None:
+ locals_ = locals().copy()
+ for key, value in locals_.items():
+ if key != "self" and value is not None:
+ setattr(self.__class__, key, value)
+
+ @classmethod
+ def get_config(cls):
+ return {
+ k: v
+ for k, v in cls.__dict__.items()
+ if not k.startswith("__")
+ and not isinstance(
+ v,
+ (
+ types.FunctionType,
+ types.BuiltinFunctionType,
+ classmethod,
+ staticmethod,
+ ),
+ )
+ and v is not None
+ }
+
+ def get_supported_openai_params(self) -> List[str]:
+ return ["dimensions"]
+
+ def map_openai_params(
+ self, non_default_params: dict, optional_params: dict
+ ) -> dict:
+ if "dimensions" in non_default_params:
+ optional_params["dimensions"] = non_default_params["dimensions"]
+ return optional_params
+
+ def _get_openai_compatible_provider_info(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ ) -> Tuple[str, Optional[str], Optional[str]]:
+ """
+ Returns:
+ Tuple[str, Optional[str], Optional[str]]:
+ - custom_llm_provider: str
+ - api_base: str
+ - dynamic_api_key: str
+ """
+ api_base = (
+ api_base or get_secret_str("JINA_AI_API_BASE") or "https://api.jina.ai/v1"
+ ) # type: ignore
+ dynamic_api_key = api_key or (
+ get_secret_str("JINA_AI_API_KEY")
+ or get_secret_str("JINA_AI_API_KEY")
+ or get_secret_str("JINA_AI_API_KEY")
+ or get_secret_str("JINA_AI_TOKEN")
+ )
+ return LlmProviders.JINA_AI.value, api_base, dynamic_api_key
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/handler.py b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/handler.py
new file mode 100644
index 00000000..94076da4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/handler.py
@@ -0,0 +1,3 @@
+"""
+HTTP calling migrated to `llm_http_handler.py`
+"""
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/transformation.py
new file mode 100644
index 00000000..8d0a9b14
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/jina_ai/rerank/transformation.py
@@ -0,0 +1,143 @@
+"""
+Transformation logic from Cohere's /v1/rerank format to Jina AI's `/v1/rerank` format.
+
+Why separate file? Make it easy to see how transformation works
+
+Docs - https://jina.ai/reranker
+"""
+
+import uuid
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+from httpx import URL, Response
+
+from litellm.llms.base_llm.chat.transformation import LiteLLMLoggingObj
+from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
+from litellm.types.rerank import (
+ OptionalRerankParams,
+ RerankBilledUnits,
+ RerankResponse,
+ RerankResponseMeta,
+ RerankTokens,
+)
+from litellm.types.utils import ModelInfo
+
+
+class JinaAIRerankConfig(BaseRerankConfig):
+ def get_supported_cohere_rerank_params(self, model: str) -> list:
+ return [
+ "query",
+ "top_n",
+ "documents",
+ "return_documents",
+ ]
+
+ def map_cohere_rerank_params(
+ self,
+ non_default_params: dict,
+ model: str,
+ drop_params: bool,
+ query: str,
+ documents: List[Union[str, Dict[str, Any]]],
+ custom_llm_provider: Optional[str] = None,
+ top_n: Optional[int] = None,
+ rank_fields: Optional[List[str]] = None,
+ return_documents: Optional[bool] = True,
+ max_chunks_per_doc: Optional[int] = None,
+ max_tokens_per_doc: Optional[int] = None,
+ ) -> OptionalRerankParams:
+ optional_params = {}
+ supported_params = self.get_supported_cohere_rerank_params(model)
+ for k, v in non_default_params.items():
+ if k in supported_params:
+ optional_params[k] = v
+ return OptionalRerankParams(
+ **optional_params,
+ )
+
+ def get_complete_url(self, api_base: Optional[str], model: str) -> str:
+ base_path = "/v1/rerank"
+
+ if api_base is None:
+ return "https://api.jina.ai/v1/rerank"
+ base = URL(api_base)
+ # Reconstruct URL with cleaned path
+ cleaned_base = str(base.copy_with(path=base_path))
+
+ return cleaned_base
+
+ def transform_rerank_request(
+ self, model: str, optional_rerank_params: OptionalRerankParams, headers: Dict
+ ) -> Dict:
+ return {"model": model, **optional_rerank_params}
+
+ def transform_rerank_response(
+ self,
+ model: str,
+ raw_response: Response,
+ model_response: RerankResponse,
+ logging_obj: LiteLLMLoggingObj,
+ api_key: Optional[str] = None,
+ request_data: Dict = {},
+ optional_params: Dict = {},
+ litellm_params: Dict = {},
+ ) -> RerankResponse:
+ if raw_response.status_code != 200:
+ raise Exception(raw_response.text)
+
+ logging_obj.post_call(original_response=raw_response.text)
+
+ _json_response = raw_response.json()
+
+ _billed_units = RerankBilledUnits(**_json_response.get("usage", {}))
+ _tokens = RerankTokens(**_json_response.get("usage", {}))
+ rerank_meta = RerankResponseMeta(billed_units=_billed_units, tokens=_tokens)
+
+ _results: Optional[List[dict]] = _json_response.get("results")
+
+ if _results is None:
+ raise ValueError(f"No results found in the response={_json_response}")
+
+ return RerankResponse(
+ id=_json_response.get("id") or str(uuid.uuid4()),
+ results=_results, # type: ignore
+ meta=rerank_meta,
+ ) # Return response
+
+ def validate_environment(
+ self, headers: Dict, model: str, api_key: Optional[str] = None
+ ) -> Dict:
+ if api_key is None:
+ raise ValueError(
+ "api_key is required. Set via `api_key` parameter or `JINA_API_KEY` environment variable."
+ )
+ return {
+ "accept": "application/json",
+ "content-type": "application/json",
+ "authorization": f"Bearer {api_key}",
+ }
+
+ def calculate_rerank_cost(
+ self,
+ model: str,
+ custom_llm_provider: Optional[str] = None,
+ billed_units: Optional[RerankBilledUnits] = None,
+ model_info: Optional[ModelInfo] = None,
+ ) -> Tuple[float, float]:
+ """
+ Jina AI reranker is priced at $0.000000018 per token.
+ """
+ if (
+ model_info is None
+ or "input_cost_per_token" not in model_info
+ or model_info["input_cost_per_token"] is None
+ or billed_units is None
+ ):
+ return 0.0, 0.0
+
+ total_tokens = billed_units.get("total_tokens")
+ if total_tokens is None:
+ return 0.0, 0.0
+
+ input_cost = model_info["input_cost_per_token"] * total_tokens
+ return input_cost, 0.0