aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat/transformation.py80
1 files changed, 80 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat/transformation.py
new file mode 100644
index 00000000..212db185
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/aiohttp_openai/chat/transformation.py
@@ -0,0 +1,80 @@
+"""
+*New config* for using aiohttp to make the request to the custom OpenAI-like provider
+
+This leads to 10x higher RPS than httpx
+https://github.com/BerriAI/litellm/issues/6592
+
+New config to ensure we introduce this without causing breaking changes for users
+"""
+
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from aiohttp import ClientResponse
+
+from litellm.llms.openai_like.chat.transformation import OpenAILikeChatConfig
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import Choices, ModelResponse
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+ LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+ LiteLLMLoggingObj = Any
+
+
+class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ """
+ Ensure - /v1/chat/completions is at the end of the url
+
+ """
+ if api_base is None:
+ api_base = "https://api.openai.com"
+
+ if not api_base.endswith("/chat/completions"):
+ api_base += "/chat/completions"
+ return api_base
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ return {"Authorization": f"Bearer {api_key}"}
+
+ async def transform_response( # type: ignore
+ self,
+ model: str,
+ raw_response: ClientResponse,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ _json_response = await raw_response.json()
+ model_response.id = _json_response.get("id")
+ model_response.choices = [
+ Choices(**choice) for choice in _json_response.get("choices")
+ ]
+ model_response.created = _json_response.get("created")
+ model_response.model = _json_response.get("model")
+ model_response.object = _json_response.get("object")
+ model_response.system_fingerprint = _json_response.get("system_fingerprint")
+ return model_response