about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/base_llm
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/base_llm')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/anthropic_messages/transformation.py35
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/audio_transcription/transformation.py73
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_model_iterator.py137
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_utils.py142
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/chat/transformation.py372
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/completion/transformation.py74
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/embedding/transformation.py88
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/image_variations/transformation.py132
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/rerank/transformation.py128
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/base_llm/responses/transformation.py141
10 files changed, 1322 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/anthropic_messages/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/anthropic_messages/transformation.py
new file mode 100644
index 00000000..7619ffbb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/anthropic_messages/transformation.py
@@ -0,0 +1,35 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, Optional
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseAnthropicMessagesConfig(ABC):
+    @abstractmethod
+    def validate_environment(
+        self,
+        headers: dict,
+        model: str,
+        api_key: Optional[str] = None,
+    ) -> dict:
+        pass
+
+    @abstractmethod
+    def get_complete_url(self, api_base: Optional[str], model: str) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    @abstractmethod
+    def get_supported_anthropic_messages_params(self, model: str) -> list:
+        pass
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/audio_transcription/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/audio_transcription/transformation.py
new file mode 100644
index 00000000..e550c574
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/audio_transcription/transformation.py
@@ -0,0 +1,73 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, List, Optional
+
+import httpx
+
+from litellm.llms.base_llm.chat.transformation import BaseConfig
+from litellm.types.llms.openai import (
+    AllMessageValues,
+    OpenAIAudioTranscriptionOptionalParams,
+)
+from litellm.types.utils import ModelResponse
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseAudioTranscriptionConfig(BaseConfig, ABC):
+    @abstractmethod
+    def get_supported_openai_params(
+        self, model: str
+    ) -> List[OpenAIAudioTranscriptionOptionalParams]:
+        pass
+
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        optional_params: dict,
+        litellm_params: dict,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    def transform_request(
+        self,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        headers: dict,
+    ) -> dict:
+        raise NotImplementedError(
+            "AudioTranscriptionConfig does not need a request transformation for audio transcription models"
+        )
+
+    def transform_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: ModelResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+        json_mode: Optional[bool] = None,
+    ) -> ModelResponse:
+        raise NotImplementedError(
+            "AudioTranscriptionConfig does not need a response transformation for audio transcription models"
+        )
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_model_iterator.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_model_iterator.py
new file mode 100644
index 00000000..67b1466c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_model_iterator.py
@@ -0,0 +1,137 @@
+import json
+from abc import abstractmethod
+from typing import Optional, Union
+
+from litellm.types.utils import GenericStreamingChunk, ModelResponseStream
+
+
+class BaseModelResponseIterator:
+    def __init__(
+        self, streaming_response, sync_stream: bool, json_mode: Optional[bool] = False
+    ):
+        self.streaming_response = streaming_response
+        self.response_iterator = self.streaming_response
+        self.json_mode = json_mode
+
+    def chunk_parser(
+        self, chunk: dict
+    ) -> Union[GenericStreamingChunk, ModelResponseStream]:
+        return GenericStreamingChunk(
+            text="",
+            is_finished=False,
+            finish_reason="",
+            usage=None,
+            index=0,
+            tool_use=None,
+        )
+
+    # Sync iterator
+    def __iter__(self):
+        return self
+
+    def _handle_string_chunk(
+        self, str_line: str
+    ) -> Union[GenericStreamingChunk, ModelResponseStream]:
+        # chunk is a str at this point
+        if "[DONE]" in str_line:
+            return GenericStreamingChunk(
+                text="",
+                is_finished=True,
+                finish_reason="stop",
+                usage=None,
+                index=0,
+                tool_use=None,
+            )
+        elif str_line.startswith("data:"):
+            data_json = json.loads(str_line[5:])
+            return self.chunk_parser(chunk=data_json)
+        else:
+            return GenericStreamingChunk(
+                text="",
+                is_finished=False,
+                finish_reason="",
+                usage=None,
+                index=0,
+                tool_use=None,
+            )
+
+    def __next__(self):
+        try:
+            chunk = self.response_iterator.__next__()
+        except StopIteration:
+            raise StopIteration
+        except ValueError as e:
+            raise RuntimeError(f"Error receiving chunk from stream: {e}")
+
+        try:
+            str_line = chunk
+            if isinstance(chunk, bytes):  # Handle binary data
+                str_line = chunk.decode("utf-8")  # Convert bytes to string
+                index = str_line.find("data:")
+                if index != -1:
+                    str_line = str_line[index:]
+            # chunk is a str at this point
+            return self._handle_string_chunk(str_line=str_line)
+        except StopIteration:
+            raise StopIteration
+        except ValueError as e:
+            raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}")
+
+    # Async iterator
+    def __aiter__(self):
+        self.async_response_iterator = self.streaming_response.__aiter__()
+        return self
+
+    async def __anext__(self):
+        try:
+            chunk = await self.async_response_iterator.__anext__()
+        except StopAsyncIteration:
+            raise StopAsyncIteration
+        except ValueError as e:
+            raise RuntimeError(f"Error receiving chunk from stream: {e}")
+
+        try:
+            str_line = chunk
+            if isinstance(chunk, bytes):  # Handle binary data
+                str_line = chunk.decode("utf-8")  # Convert bytes to string
+                index = str_line.find("data:")
+                if index != -1:
+                    str_line = str_line[index:]
+
+            # chunk is a str at this point
+            return self._handle_string_chunk(str_line=str_line)
+        except StopAsyncIteration:
+            raise StopAsyncIteration
+        except ValueError as e:
+            raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}")
+
+
+class FakeStreamResponseIterator:
+    def __init__(self, model_response, json_mode: Optional[bool] = False):
+        self.model_response = model_response
+        self.json_mode = json_mode
+        self.is_done = False
+
+    # Sync iterator
+    def __iter__(self):
+        return self
+
+    @abstractmethod
+    def chunk_parser(self, chunk: dict) -> GenericStreamingChunk:
+        pass
+
+    def __next__(self):
+        if self.is_done:
+            raise StopIteration
+        self.is_done = True
+        return self.chunk_parser(self.model_response)
+
+    # Async iterator
+    def __aiter__(self):
+        return self
+
+    async def __anext__(self):
+        if self.is_done:
+            raise StopAsyncIteration
+        self.is_done = True
+        return self.chunk_parser(self.model_response)
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_utils.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_utils.py
new file mode 100644
index 00000000..919cdbfd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/base_utils.py
@@ -0,0 +1,142 @@
+"""
+Utility functions for base LLM classes.
+"""
+
+import copy
+from abc import ABC, abstractmethod
+from typing import List, Optional, Type, Union
+
+from openai.lib import _parsing, _pydantic
+from pydantic import BaseModel
+
+from litellm._logging import verbose_logger
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import ProviderSpecificModelInfo
+
+
+class BaseLLMModelInfo(ABC):
+    def get_provider_info(
+        self,
+        model: str,
+    ) -> Optional[ProviderSpecificModelInfo]:
+        return None
+
+    @abstractmethod
+    def get_models(self) -> List[str]:
+        pass
+
+    @staticmethod
+    @abstractmethod
+    def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
+        pass
+
+    @staticmethod
+    @abstractmethod
+    def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
+        pass
+
+    @staticmethod
+    @abstractmethod
+    def get_base_model(model: str) -> Optional[str]:
+        """
+        Returns the base model name from the given model name.
+
+        Some providers like bedrock - can receive model=`invoke/anthropic.claude-3-opus-20240229-v1:0` or `converse/anthropic.claude-3-opus-20240229-v1:0`
+            This function will return `anthropic.claude-3-opus-20240229-v1:0`
+        """
+        pass
+
+
+def _dict_to_response_format_helper(
+    response_format: dict, ref_template: Optional[str] = None
+) -> dict:
+    if ref_template is not None and response_format.get("type") == "json_schema":
+        # Deep copy to avoid modifying original
+        modified_format = copy.deepcopy(response_format)
+        schema = modified_format["json_schema"]["schema"]
+
+        # Update all $ref values in the schema
+        def update_refs(schema):
+            stack = [(schema, [])]
+            visited = set()
+
+            while stack:
+                obj, path = stack.pop()
+                obj_id = id(obj)
+
+                if obj_id in visited:
+                    continue
+                visited.add(obj_id)
+
+                if isinstance(obj, dict):
+                    if "$ref" in obj:
+                        ref_path = obj["$ref"]
+                        model_name = ref_path.split("/")[-1]
+                        obj["$ref"] = ref_template.format(model=model_name)
+
+                    for k, v in obj.items():
+                        if isinstance(v, (dict, list)):
+                            stack.append((v, path + [k]))
+
+                elif isinstance(obj, list):
+                    for i, item in enumerate(obj):
+                        if isinstance(item, (dict, list)):
+                            stack.append((item, path + [i]))
+
+        update_refs(schema)
+        return modified_format
+    return response_format
+
+
+def type_to_response_format_param(
+    response_format: Optional[Union[Type[BaseModel], dict]],
+    ref_template: Optional[str] = None,
+) -> Optional[dict]:
+    """
+    Re-implementation of openai's 'type_to_response_format_param' function
+
+    Used for converting pydantic object to api schema.
+    """
+    if response_format is None:
+        return None
+
+    if isinstance(response_format, dict):
+        return _dict_to_response_format_helper(response_format, ref_template)
+
+    # type checkers don't narrow the negation of a `TypeGuard` as it isn't
+    # a safe default behaviour but we know that at this point the `response_format`
+    # can only be a `type`
+    if not _parsing._completions.is_basemodel_type(response_format):
+        raise TypeError(f"Unsupported response_format type - {response_format}")
+
+    if ref_template is not None:
+        schema = response_format.model_json_schema(ref_template=ref_template)
+    else:
+        schema = _pydantic.to_strict_json_schema(response_format)
+
+    return {
+        "type": "json_schema",
+        "json_schema": {
+            "schema": schema,
+            "name": response_format.__name__,
+            "strict": True,
+        },
+    }
+
+
+def map_developer_role_to_system_role(
+    messages: List[AllMessageValues],
+) -> List[AllMessageValues]:
+    """
+    Translate `developer` role to `system` role for non-OpenAI providers.
+    """
+    new_messages: List[AllMessageValues] = []
+    for m in messages:
+        if m["role"] == "developer":
+            verbose_logger.debug(
+                "Translating developer role to system role for non-OpenAI providers."
+            )  # ensure user knows what's happening with their input.
+            new_messages.append({"role": "system", "content": m["content"]})
+        else:
+            new_messages.append(m)
+    return new_messages
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/chat/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/chat/transformation.py
new file mode 100644
index 00000000..1b5a6bc5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/chat/transformation.py
@@ -0,0 +1,372 @@
+"""
+Common base config for all LLM providers
+"""
+
+import types
+from abc import ABC, abstractmethod
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    AsyncIterator,
+    Iterator,
+    List,
+    Optional,
+    Type,
+    Union,
+)
+
+import httpx
+from pydantic import BaseModel
+
+from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
+from litellm.types.llms.openai import (
+    AllMessageValues,
+    ChatCompletionToolChoiceFunctionParam,
+    ChatCompletionToolChoiceObjectParam,
+    ChatCompletionToolParam,
+    ChatCompletionToolParamFunctionChunk,
+)
+from litellm.types.utils import ModelResponse
+from litellm.utils import CustomStreamWrapper
+
+from ..base_utils import (
+    map_developer_role_to_system_role,
+    type_to_response_format_param,
+)
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseLLMException(Exception):
+    def __init__(
+        self,
+        status_code: int,
+        message: str,
+        headers: Optional[Union[dict, httpx.Headers]] = None,
+        request: Optional[httpx.Request] = None,
+        response: Optional[httpx.Response] = None,
+        body: Optional[dict] = None,
+    ):
+        self.status_code = status_code
+        self.message: str = message
+        self.headers = headers
+        if request:
+            self.request = request
+        else:
+            self.request = httpx.Request(
+                method="POST", url="https://docs.litellm.ai/docs"
+            )
+        if response:
+            self.response = response
+        else:
+            self.response = httpx.Response(
+                status_code=status_code, request=self.request
+            )
+        self.body = body
+        super().__init__(
+            self.message
+        )  # Call the base class constructor with the parameters it needs
+
+
+class BaseConfig(ABC):
+    def __init__(self):
+        pass
+
+    @classmethod
+    def get_config(cls):
+        return {
+            k: v
+            for k, v in cls.__dict__.items()
+            if not k.startswith("__")
+            and not k.startswith("_abc")
+            and not isinstance(
+                v,
+                (
+                    types.FunctionType,
+                    types.BuiltinFunctionType,
+                    classmethod,
+                    staticmethod,
+                ),
+            )
+            and v is not None
+        }
+
+    def get_json_schema_from_pydantic_object(
+        self, response_format: Optional[Union[Type[BaseModel], dict]]
+    ) -> Optional[dict]:
+        return type_to_response_format_param(response_format=response_format)
+
+    def should_fake_stream(
+        self,
+        model: Optional[str],
+        stream: Optional[bool],
+        custom_llm_provider: Optional[str] = None,
+    ) -> bool:
+        """
+        Returns True if the model/provider should fake stream
+        """
+        return False
+
+    def _add_tools_to_optional_params(self, optional_params: dict, tools: List) -> dict:
+        """
+        Helper util to add tools to optional_params.
+        """
+        if "tools" not in optional_params:
+            optional_params["tools"] = tools
+        else:
+            optional_params["tools"] = [
+                *optional_params["tools"],
+                *tools,
+            ]
+        return optional_params
+
+    def translate_developer_role_to_system_role(
+        self,
+        messages: List[AllMessageValues],
+    ) -> List[AllMessageValues]:
+        """
+        Translate `developer` role to `system` role for non-OpenAI providers.
+
+        Overriden by OpenAI/Azure
+        """
+        return map_developer_role_to_system_role(messages=messages)
+
+    def should_retry_llm_api_inside_llm_translation_on_http_error(
+        self, e: httpx.HTTPStatusError, litellm_params: dict
+    ) -> bool:
+        """
+        Returns True if the model/provider should retry the LLM API on UnprocessableEntityError
+
+        Overriden by azure ai - where different models support different parameters
+        """
+        return False
+
+    def transform_request_on_unprocessable_entity_error(
+        self, e: httpx.HTTPStatusError, request_data: dict
+    ) -> dict:
+        """
+        Transform the request data on UnprocessableEntityError
+        """
+        return request_data
+
+    @property
+    def max_retry_on_unprocessable_entity_error(self) -> int:
+        """
+        Returns the max retry count for UnprocessableEntityError
+
+        Used if `should_retry_llm_api_inside_llm_translation_on_http_error` is True
+        """
+        return 0
+
+    @abstractmethod
+    def get_supported_openai_params(self, model: str) -> list:
+        pass
+
+    def _add_response_format_to_tools(
+        self,
+        optional_params: dict,
+        value: dict,
+        is_response_format_supported: bool,
+        enforce_tool_choice: bool = True,
+    ) -> dict:
+        """
+        Follow similar approach to anthropic - translate to a single tool call.
+
+        When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
+        - You usually want to provide a single tool
+        - You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
+        - Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.
+
+        Add response format to tools
+
+        This is used to translate response_format to a tool call, for models/APIs that don't support response_format directly.
+        """
+        json_schema: Optional[dict] = None
+        if "response_schema" in value:
+            json_schema = value["response_schema"]
+        elif "json_schema" in value:
+            json_schema = value["json_schema"]["schema"]
+
+        if json_schema and not is_response_format_supported:
+
+            _tool_choice = ChatCompletionToolChoiceObjectParam(
+                type="function",
+                function=ChatCompletionToolChoiceFunctionParam(
+                    name=RESPONSE_FORMAT_TOOL_NAME
+                ),
+            )
+
+            _tool = ChatCompletionToolParam(
+                type="function",
+                function=ChatCompletionToolParamFunctionChunk(
+                    name=RESPONSE_FORMAT_TOOL_NAME, parameters=json_schema
+                ),
+            )
+
+            optional_params.setdefault("tools", [])
+            optional_params["tools"].append(_tool)
+            if enforce_tool_choice:
+                optional_params["tool_choice"] = _tool_choice
+
+            optional_params["json_mode"] = True
+        elif is_response_format_supported:
+            optional_params["response_format"] = value
+        return optional_params
+
+    @abstractmethod
+    def map_openai_params(
+        self,
+        non_default_params: dict,
+        optional_params: dict,
+        model: str,
+        drop_params: bool,
+    ) -> dict:
+        pass
+
+    @abstractmethod
+    def validate_environment(
+        self,
+        headers: dict,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        api_key: Optional[str] = None,
+        api_base: Optional[str] = None,
+    ) -> dict:
+        pass
+
+    def sign_request(
+        self,
+        headers: dict,
+        optional_params: dict,
+        request_data: dict,
+        api_base: str,
+        model: Optional[str] = None,
+        stream: Optional[bool] = None,
+        fake_stream: Optional[bool] = None,
+    ) -> dict:
+        """
+        Some providers like Bedrock require signing the request. The sign request funtion needs access to `request_data` and `complete_url`
+        Args:
+            headers: dict
+            optional_params: dict
+            request_data: dict - the request body being sent in http request
+            api_base: str - the complete url being sent in http request
+        Returns:
+            dict - the signed headers
+
+        Update the headers with the signed headers in this function. The return values will be sent as headers in the http request.
+        """
+        return headers
+
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        optional_params: dict,
+        litellm_params: dict,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        if api_base is None:
+            raise ValueError("api_base is required")
+        return api_base
+
+    @abstractmethod
+    def transform_request(
+        self,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        headers: dict,
+    ) -> dict:
+        pass
+
+    @abstractmethod
+    def transform_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: ModelResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+        json_mode: Optional[bool] = None,
+    ) -> ModelResponse:
+        pass
+
+    @abstractmethod
+    def get_error_class(
+        self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+    ) -> BaseLLMException:
+        pass
+
+    def get_model_response_iterator(
+        self,
+        streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse],
+        sync_stream: bool,
+        json_mode: Optional[bool] = False,
+    ) -> Any:
+        pass
+
+    def get_async_custom_stream_wrapper(
+        self,
+        model: str,
+        custom_llm_provider: str,
+        logging_obj: LiteLLMLoggingObj,
+        api_base: str,
+        headers: dict,
+        data: dict,
+        messages: list,
+        client: Optional[AsyncHTTPHandler] = None,
+        json_mode: Optional[bool] = None,
+    ) -> CustomStreamWrapper:
+        raise NotImplementedError
+
+    def get_sync_custom_stream_wrapper(
+        self,
+        model: str,
+        custom_llm_provider: str,
+        logging_obj: LiteLLMLoggingObj,
+        api_base: str,
+        headers: dict,
+        data: dict,
+        messages: list,
+        client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+        json_mode: Optional[bool] = None,
+    ) -> CustomStreamWrapper:
+        raise NotImplementedError
+
+    @property
+    def custom_llm_provider(self) -> Optional[str]:
+        return None
+
+    @property
+    def has_custom_stream_wrapper(self) -> bool:
+        return False
+
+    @property
+    def supports_stream_param_in_request_body(self) -> bool:
+        """
+        Some providers like Bedrock invoke do not support the stream parameter in the request body.
+
+        By default, this is true for almost all providers.
+        """
+        return True
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/completion/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/completion/transformation.py
new file mode 100644
index 00000000..9432f02d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/completion/transformation.py
@@ -0,0 +1,74 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, List, Optional, Union
+
+import httpx
+
+from litellm.llms.base_llm.chat.transformation import BaseConfig
+from litellm.types.llms.openai import AllMessageValues, OpenAITextCompletionUserMessage
+from litellm.types.utils import ModelResponse
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseTextCompletionConfig(BaseConfig, ABC):
+    @abstractmethod
+    def transform_text_completion_request(
+        self,
+        model: str,
+        messages: Union[List[AllMessageValues], List[OpenAITextCompletionUserMessage]],
+        optional_params: dict,
+        headers: dict,
+    ) -> dict:
+        return {}
+
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        optional_params: dict,
+        litellm_params: dict,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    def transform_request(
+        self,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        headers: dict,
+    ) -> dict:
+        raise NotImplementedError(
+            "AudioTranscriptionConfig does not need a request transformation for audio transcription models"
+        )
+
+    def transform_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: ModelResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+        json_mode: Optional[bool] = None,
+    ) -> ModelResponse:
+        raise NotImplementedError(
+            "AudioTranscriptionConfig does not need a response transformation for audio transcription models"
+        )
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/embedding/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/embedding/transformation.py
new file mode 100644
index 00000000..68c0a7c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/embedding/transformation.py
@@ -0,0 +1,88 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, List, Optional
+
+import httpx
+
+from litellm.llms.base_llm.chat.transformation import BaseConfig
+from litellm.types.llms.openai import AllEmbeddingInputValues, AllMessageValues
+from litellm.types.utils import EmbeddingResponse, ModelResponse
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseEmbeddingConfig(BaseConfig, ABC):
+    @abstractmethod
+    def transform_embedding_request(
+        self,
+        model: str,
+        input: AllEmbeddingInputValues,
+        optional_params: dict,
+        headers: dict,
+    ) -> dict:
+        return {}
+
+    @abstractmethod
+    def transform_embedding_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: EmbeddingResponse,
+        logging_obj: LiteLLMLoggingObj,
+        api_key: Optional[str],
+        request_data: dict,
+        optional_params: dict,
+        litellm_params: dict,
+    ) -> EmbeddingResponse:
+        return model_response
+
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        optional_params: dict,
+        litellm_params: dict,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    def transform_request(
+        self,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        headers: dict,
+    ) -> dict:
+        raise NotImplementedError(
+            "EmbeddingConfig does not need a request transformation for chat models"
+        )
+
+    def transform_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: ModelResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+        json_mode: Optional[bool] = None,
+    ) -> ModelResponse:
+        raise NotImplementedError(
+            "EmbeddingConfig does not need a response transformation for chat models"
+        )
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/image_variations/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/image_variations/transformation.py
new file mode 100644
index 00000000..4d1cd6ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/image_variations/transformation.py
@@ -0,0 +1,132 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, List, Optional
+
+import httpx
+from aiohttp import ClientResponse
+
+from litellm.llms.base_llm.chat.transformation import BaseConfig
+from litellm.types.llms.openai import (
+    AllMessageValues,
+    OpenAIImageVariationOptionalParams,
+)
+from litellm.types.utils import (
+    FileTypes,
+    HttpHandlerRequestFields,
+    ImageResponse,
+    ModelResponse,
+)
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseImageVariationConfig(BaseConfig, ABC):
+    @abstractmethod
+    def get_supported_openai_params(
+        self, model: str
+    ) -> List[OpenAIImageVariationOptionalParams]:
+        pass
+
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        optional_params: dict,
+        litellm_params: dict,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    @abstractmethod
+    def transform_request_image_variation(
+        self,
+        model: Optional[str],
+        image: FileTypes,
+        optional_params: dict,
+        headers: dict,
+    ) -> HttpHandlerRequestFields:
+        pass
+
+    def validate_environment(
+        self,
+        headers: dict,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        api_key: Optional[str] = None,
+        api_base: Optional[str] = None,
+    ) -> dict:
+        return {}
+
+    @abstractmethod
+    async def async_transform_response_image_variation(
+        self,
+        model: Optional[str],
+        raw_response: ClientResponse,
+        model_response: ImageResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        image: FileTypes,
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+    ) -> ImageResponse:
+        pass
+
+    @abstractmethod
+    def transform_response_image_variation(
+        self,
+        model: Optional[str],
+        raw_response: httpx.Response,
+        model_response: ImageResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        image: FileTypes,
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+    ) -> ImageResponse:
+        pass
+
+    def transform_request(
+        self,
+        model: str,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        headers: dict,
+    ) -> dict:
+        raise NotImplementedError(
+            "ImageVariationConfig implementa 'transform_request_image_variation' for image variation models"
+        )
+
+    def transform_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: ModelResponse,
+        logging_obj: LiteLLMLoggingObj,
+        request_data: dict,
+        messages: List[AllMessageValues],
+        optional_params: dict,
+        litellm_params: dict,
+        encoding: Any,
+        api_key: Optional[str] = None,
+        json_mode: Optional[bool] = None,
+    ) -> ModelResponse:
+        raise NotImplementedError(
+            "ImageVariationConfig implements 'transform_response_image_variation' for image variation models"
+        )
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/rerank/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/rerank/transformation.py
new file mode 100644
index 00000000..8701fe57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/rerank/transformation.py
@@ -0,0 +1,128 @@
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
+
+import httpx
+
+from litellm.types.rerank import OptionalRerankParams, RerankBilledUnits, RerankResponse
+from litellm.types.utils import ModelInfo
+
+from ..chat.transformation import BaseLLMException
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+    LiteLLMLoggingObj = Any
+
+
+class BaseRerankConfig(ABC):
+    @abstractmethod
+    def validate_environment(
+        self,
+        headers: dict,
+        model: str,
+        api_key: Optional[str] = None,
+    ) -> dict:
+        pass
+
+    @abstractmethod
+    def transform_rerank_request(
+        self,
+        model: str,
+        optional_rerank_params: OptionalRerankParams,
+        headers: dict,
+    ) -> dict:
+        return {}
+
+    @abstractmethod
+    def transform_rerank_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        model_response: RerankResponse,
+        logging_obj: LiteLLMLoggingObj,
+        api_key: Optional[str] = None,
+        request_data: dict = {},
+        optional_params: dict = {},
+        litellm_params: dict = {},
+    ) -> RerankResponse:
+        return model_response
+
+    @abstractmethod
+    def get_complete_url(self, api_base: Optional[str], model: str) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        return api_base or ""
+
+    @abstractmethod
+    def get_supported_cohere_rerank_params(self, model: str) -> list:
+        pass
+
+    @abstractmethod
+    def map_cohere_rerank_params(
+        self,
+        non_default_params: dict,
+        model: str,
+        drop_params: bool,
+        query: str,
+        documents: List[Union[str, Dict[str, Any]]],
+        custom_llm_provider: Optional[str] = None,
+        top_n: Optional[int] = None,
+        rank_fields: Optional[List[str]] = None,
+        return_documents: Optional[bool] = True,
+        max_chunks_per_doc: Optional[int] = None,
+        max_tokens_per_doc: Optional[int] = None,
+    ) -> OptionalRerankParams:
+        pass
+
+    def get_error_class(
+        self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+    ) -> BaseLLMException:
+        raise BaseLLMException(
+            status_code=status_code,
+            message=error_message,
+            headers=headers,
+        )
+
+    def calculate_rerank_cost(
+        self,
+        model: str,
+        custom_llm_provider: Optional[str] = None,
+        billed_units: Optional[RerankBilledUnits] = None,
+        model_info: Optional[ModelInfo] = None,
+    ) -> Tuple[float, float]:
+        """
+        Calculates the cost per query for a given rerank model.
+
+        Input:
+            - model: str, the model name without provider prefix
+            - custom_llm_provider: str, the provider used for the model. If provided, used to check if the litellm model info is for that provider.
+            - num_queries: int, the number of queries to calculate the cost for
+            - model_info: ModelInfo, the model info for the given model
+
+        Returns:
+            Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd
+        """
+
+        if (
+            model_info is None
+            or "input_cost_per_query" not in model_info
+            or model_info["input_cost_per_query"] is None
+            or billed_units is None
+        ):
+            return 0.0, 0.0
+
+        search_units = billed_units.get("search_units")
+
+        if search_units is None:
+            return 0.0, 0.0
+
+        prompt_cost = model_info["input_cost_per_query"] * search_units
+
+        return prompt_cost, 0.0
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/responses/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/responses/transformation.py
new file mode 100644
index 00000000..29555c55
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/base_llm/responses/transformation.py
@@ -0,0 +1,141 @@
+import types
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, Dict, Optional, Union
+
+import httpx
+
+from litellm.types.llms.openai import (
+    ResponseInputParam,
+    ResponsesAPIOptionalRequestParams,
+    ResponsesAPIResponse,
+    ResponsesAPIStreamingResponse,
+)
+from litellm.types.router import GenericLiteLLMParams
+
+if TYPE_CHECKING:
+    from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+    from ..chat.transformation import BaseLLMException as _BaseLLMException
+
+    LiteLLMLoggingObj = _LiteLLMLoggingObj
+    BaseLLMException = _BaseLLMException
+else:
+    LiteLLMLoggingObj = Any
+    BaseLLMException = Any
+
+
+class BaseResponsesAPIConfig(ABC):
+    def __init__(self):
+        pass
+
+    @classmethod
+    def get_config(cls):
+        return {
+            k: v
+            for k, v in cls.__dict__.items()
+            if not k.startswith("__")
+            and not k.startswith("_abc")
+            and not isinstance(
+                v,
+                (
+                    types.FunctionType,
+                    types.BuiltinFunctionType,
+                    classmethod,
+                    staticmethod,
+                ),
+            )
+            and v is not None
+        }
+
+    @abstractmethod
+    def get_supported_openai_params(self, model: str) -> list:
+        pass
+
+    @abstractmethod
+    def map_openai_params(
+        self,
+        response_api_optional_params: ResponsesAPIOptionalRequestParams,
+        model: str,
+        drop_params: bool,
+    ) -> Dict:
+
+        pass
+
+    @abstractmethod
+    def validate_environment(
+        self,
+        headers: dict,
+        model: str,
+        api_key: Optional[str] = None,
+    ) -> dict:
+        return {}
+
+    @abstractmethod
+    def get_complete_url(
+        self,
+        api_base: Optional[str],
+        model: str,
+        stream: Optional[bool] = None,
+    ) -> str:
+        """
+        OPTIONAL
+
+        Get the complete url for the request
+
+        Some providers need `model` in `api_base`
+        """
+        if api_base is None:
+            raise ValueError("api_base is required")
+        return api_base
+
+    @abstractmethod
+    def transform_responses_api_request(
+        self,
+        model: str,
+        input: Union[str, ResponseInputParam],
+        response_api_optional_request_params: Dict,
+        litellm_params: GenericLiteLLMParams,
+        headers: dict,
+    ) -> Dict:
+        pass
+
+    @abstractmethod
+    def transform_response_api_response(
+        self,
+        model: str,
+        raw_response: httpx.Response,
+        logging_obj: LiteLLMLoggingObj,
+    ) -> ResponsesAPIResponse:
+        pass
+
+    @abstractmethod
+    def transform_streaming_response(
+        self,
+        model: str,
+        parsed_chunk: dict,
+        logging_obj: LiteLLMLoggingObj,
+    ) -> ResponsesAPIStreamingResponse:
+        """
+        Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse
+        """
+        pass
+
+    def get_error_class(
+        self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+    ) -> BaseLLMException:
+        from ..chat.transformation import BaseLLMException
+
+        raise BaseLLMException(
+            status_code=status_code,
+            message=error_message,
+            headers=headers,
+        )
+
+    def should_fake_stream(
+        self,
+        model: Optional[str],
+        stream: Optional[bool],
+        custom_llm_provider: Optional[str] = None,
+    ) -> bool:
+        """Returns True if litellm should fake a stream for the given model and stream value"""
+        return False