about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/handler.py178
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/transformation.py153
2 files changed, 331 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/handler.py b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/handler.py
new file mode 100644
index 00000000..e7f22ea7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/handler.py
@@ -0,0 +1,178 @@
+import json
+from typing import Any, Callable, Optional, Union
+
+import httpx
+
+import litellm
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.llms.custom_httpx.http_handler import (
+    AsyncHTTPHandler,
+    HTTPHandler,
+    get_async_httpx_client,
+)
+from litellm.types.llms.bedrock import CohereEmbeddingRequest
+from litellm.types.utils import EmbeddingResponse
+
+from .transformation import CohereEmbeddingConfig
+
+
+def validate_environment(api_key, headers: dict):
+    headers.update(
+        {
+            "Request-Source": "unspecified:litellm",
+            "accept": "application/json",
+            "content-type": "application/json",
+        }
+    )
+    if api_key:
+        headers["Authorization"] = f"Bearer {api_key}"
+    return headers
+
+
+class CohereError(Exception):
+    def __init__(self, status_code, message):
+        self.status_code = status_code
+        self.message = message
+        self.request = httpx.Request(
+            method="POST", url="https://api.cohere.ai/v1/generate"
+        )
+        self.response = httpx.Response(status_code=status_code, request=self.request)
+        super().__init__(
+            self.message
+        )  # Call the base class constructor with the parameters it needs
+
+
+async def async_embedding(
+    model: str,
+    data: Union[dict, CohereEmbeddingRequest],
+    input: list,
+    model_response: litellm.utils.EmbeddingResponse,
+    timeout: Optional[Union[float, httpx.Timeout]],
+    logging_obj: LiteLLMLoggingObj,
+    optional_params: dict,
+    api_base: str,
+    api_key: Optional[str],
+    headers: dict,
+    encoding: Callable,
+    client: Optional[AsyncHTTPHandler] = None,
+):
+
+    ## LOGGING
+    logging_obj.pre_call(
+        input=input,
+        api_key=api_key,
+        additional_args={
+            "complete_input_dict": data,
+            "headers": headers,
+            "api_base": api_base,
+        },
+    )
+    ## COMPLETION CALL
+
+    if client is None:
+        client = get_async_httpx_client(
+            llm_provider=litellm.LlmProviders.COHERE,
+            params={"timeout": timeout},
+        )
+
+    try:
+        response = await client.post(api_base, headers=headers, data=json.dumps(data))
+    except httpx.HTTPStatusError as e:
+        ## LOGGING
+        logging_obj.post_call(
+            input=input,
+            api_key=api_key,
+            additional_args={"complete_input_dict": data},
+            original_response=e.response.text,
+        )
+        raise e
+    except Exception as e:
+        ## LOGGING
+        logging_obj.post_call(
+            input=input,
+            api_key=api_key,
+            additional_args={"complete_input_dict": data},
+            original_response=str(e),
+        )
+        raise e
+
+    ## PROCESS RESPONSE ##
+    return CohereEmbeddingConfig()._transform_response(
+        response=response,
+        api_key=api_key,
+        logging_obj=logging_obj,
+        data=data,
+        model_response=model_response,
+        model=model,
+        encoding=encoding,
+        input=input,
+    )
+
+
+def embedding(
+    model: str,
+    input: list,
+    model_response: EmbeddingResponse,
+    logging_obj: LiteLLMLoggingObj,
+    optional_params: dict,
+    headers: dict,
+    encoding: Any,
+    data: Optional[Union[dict, CohereEmbeddingRequest]] = None,
+    complete_api_base: Optional[str] = None,
+    api_key: Optional[str] = None,
+    aembedding: Optional[bool] = None,
+    timeout: Optional[Union[float, httpx.Timeout]] = httpx.Timeout(None),
+    client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+):
+    headers = validate_environment(api_key, headers=headers)
+    embed_url = complete_api_base or "https://api.cohere.ai/v1/embed"
+    model = model
+
+    data = data or CohereEmbeddingConfig()._transform_request(
+        model=model, input=input, inference_params=optional_params
+    )
+
+    ## ROUTING
+    if aembedding is True:
+        return async_embedding(
+            model=model,
+            data=data,
+            input=input,
+            model_response=model_response,
+            timeout=timeout,
+            logging_obj=logging_obj,
+            optional_params=optional_params,
+            api_base=embed_url,
+            api_key=api_key,
+            headers=headers,
+            encoding=encoding,
+            client=(
+                client
+                if client is not None and isinstance(client, AsyncHTTPHandler)
+                else None
+            ),
+        )
+
+    ## LOGGING
+    logging_obj.pre_call(
+        input=input,
+        api_key=api_key,
+        additional_args={"complete_input_dict": data},
+    )
+
+    ## COMPLETION CALL
+    if client is None or not isinstance(client, HTTPHandler):
+        client = HTTPHandler(concurrent_limit=1)
+
+    response = client.post(embed_url, headers=headers, data=json.dumps(data))
+
+    return CohereEmbeddingConfig()._transform_response(
+        response=response,
+        api_key=api_key,
+        logging_obj=logging_obj,
+        data=data,
+        model_response=model_response,
+        model=model,
+        encoding=encoding,
+        input=input,
+    )
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/transformation.py
new file mode 100644
index 00000000..22e157a0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/cohere/embed/transformation.py
@@ -0,0 +1,153 @@
+"""
+Transformation logic from OpenAI /v1/embeddings format to Cohere's /v1/embed format.
+
+Why separate file? Make it easy to see how transformation works
+
+Convers
+- v3 embedding models
+- v2 embedding models
+
+Docs - https://docs.cohere.com/v2/reference/embed
+"""
+
+from typing import Any, List, Optional, Union
+
+import httpx
+
+from litellm import COHERE_DEFAULT_EMBEDDING_INPUT_TYPE
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.types.llms.bedrock import (
+    CohereEmbeddingRequest,
+    CohereEmbeddingRequestWithModel,
+)
+from litellm.types.utils import EmbeddingResponse, PromptTokensDetailsWrapper, Usage
+from litellm.utils import is_base64_encoded
+
+
+class CohereEmbeddingConfig:
+    """
+    Reference: https://docs.cohere.com/v2/reference/embed
+    """
+
+    def __init__(self) -> None:
+        pass
+
+    def get_supported_openai_params(self) -> List[str]:
+        return ["encoding_format"]
+
+    def map_openai_params(
+        self, non_default_params: dict, optional_params: dict
+    ) -> dict:
+        for k, v in non_default_params.items():
+            if k == "encoding_format":
+                optional_params["embedding_types"] = v
+        return optional_params
+
+    def _is_v3_model(self, model: str) -> bool:
+        return "3" in model
+
+    def _transform_request(
+        self, model: str, input: List[str], inference_params: dict
+    ) -> CohereEmbeddingRequestWithModel:
+        is_encoded = False
+        for input_str in input:
+            is_encoded = is_base64_encoded(input_str)
+
+        if is_encoded:  # check if string is b64 encoded image or not
+            transformed_request = CohereEmbeddingRequestWithModel(
+                model=model,
+                images=input,
+                input_type="image",
+            )
+        else:
+            transformed_request = CohereEmbeddingRequestWithModel(
+                model=model,
+                texts=input,
+                input_type=COHERE_DEFAULT_EMBEDDING_INPUT_TYPE,
+            )
+
+        for k, v in inference_params.items():
+            transformed_request[k] = v  # type: ignore
+
+        return transformed_request
+
+    def _calculate_usage(self, input: List[str], encoding: Any, meta: dict) -> Usage:
+
+        input_tokens = 0
+
+        text_tokens: Optional[int] = meta.get("billed_units", {}).get("input_tokens")
+
+        image_tokens: Optional[int] = meta.get("billed_units", {}).get("images")
+
+        prompt_tokens_details: Optional[PromptTokensDetailsWrapper] = None
+        if image_tokens is None and text_tokens is None:
+            for text in input:
+                input_tokens += len(encoding.encode(text))
+        else:
+            prompt_tokens_details = PromptTokensDetailsWrapper(
+                image_tokens=image_tokens,
+                text_tokens=text_tokens,
+            )
+            if image_tokens:
+                input_tokens += image_tokens
+            if text_tokens:
+                input_tokens += text_tokens
+
+        return Usage(
+            prompt_tokens=input_tokens,
+            completion_tokens=0,
+            total_tokens=input_tokens,
+            prompt_tokens_details=prompt_tokens_details,
+        )
+
+    def _transform_response(
+        self,
+        response: httpx.Response,
+        api_key: Optional[str],
+        logging_obj: LiteLLMLoggingObj,
+        data: Union[dict, CohereEmbeddingRequest],
+        model_response: EmbeddingResponse,
+        model: str,
+        encoding: Any,
+        input: list,
+    ) -> EmbeddingResponse:
+
+        response_json = response.json()
+        ## LOGGING
+        logging_obj.post_call(
+            input=input,
+            api_key=api_key,
+            additional_args={"complete_input_dict": data},
+            original_response=response_json,
+        )
+        """
+            response 
+            {
+                'object': "list",
+                'data': [
+                
+                ]
+                'model', 
+                'usage'
+            }
+        """
+        embeddings = response_json["embeddings"]
+        output_data = []
+        for idx, embedding in enumerate(embeddings):
+            output_data.append(
+                {"object": "embedding", "index": idx, "embedding": embedding}
+            )
+        model_response.object = "list"
+        model_response.data = output_data
+        model_response.model = model
+        input_tokens = 0
+        for text in input:
+            input_tokens += len(encoding.encode(text))
+
+        setattr(
+            model_response,
+            "usage",
+            self._calculate_usage(input, encoding, response_json.get("meta", {})),
+        )
+
+        return model_response