aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py182
1 files changed, 182 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py b/.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py
new file mode 100644
index 00000000..5736ee21
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/common_utils/http_parsing_utils.py
@@ -0,0 +1,182 @@
+import json
+from typing import Dict, List, Optional
+
+import orjson
+from fastapi import Request, UploadFile, status
+
+from litellm._logging import verbose_proxy_logger
+from litellm.types.router import Deployment
+
+
+async def _read_request_body(request: Optional[Request]) -> Dict:
+ """
+ Safely read the request body and parse it as JSON.
+
+ Parameters:
+ - request: The request object to read the body from
+
+ Returns:
+ - dict: Parsed request data as a dictionary or an empty dictionary if parsing fails
+ """
+ try:
+ if request is None:
+ return {}
+
+ # Check if we already read and parsed the body
+ _cached_request_body: Optional[dict] = _safe_get_request_parsed_body(
+ request=request
+ )
+ if _cached_request_body is not None:
+ return _cached_request_body
+
+ _request_headers: dict = _safe_get_request_headers(request=request)
+ content_type = _request_headers.get("content-type", "")
+
+ if "form" in content_type:
+ parsed_body = dict(await request.form())
+ else:
+ # Read the request body
+ body = await request.body()
+
+ # Return empty dict if body is empty or None
+ if not body:
+ parsed_body = {}
+ else:
+ try:
+ parsed_body = orjson.loads(body)
+ except orjson.JSONDecodeError:
+ # Fall back to the standard json module which is more forgiving
+ # First decode bytes to string if needed
+ body_str = body.decode("utf-8") if isinstance(body, bytes) else body
+
+ # Replace invalid surrogate pairs
+ import re
+
+ # This regex finds incomplete surrogate pairs
+ body_str = re.sub(
+ r"[\uD800-\uDBFF](?![\uDC00-\uDFFF])", "", body_str
+ )
+ # This regex finds low surrogates without high surrogates
+ body_str = re.sub(
+ r"(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]", "", body_str
+ )
+
+ parsed_body = json.loads(body_str)
+
+ # Cache the parsed result
+ _safe_set_request_parsed_body(request=request, parsed_body=parsed_body)
+ return parsed_body
+
+ except (json.JSONDecodeError, orjson.JSONDecodeError):
+ verbose_proxy_logger.exception("Invalid JSON payload received.")
+ return {}
+ except Exception as e:
+ # Catch unexpected errors to avoid crashes
+ verbose_proxy_logger.exception(
+ "Unexpected error reading request body - {}".format(e)
+ )
+ return {}
+
+
+def _safe_get_request_parsed_body(request: Optional[Request]) -> Optional[dict]:
+ if request is None:
+ return None
+ if hasattr(request, "scope") and "parsed_body" in request.scope:
+ return request.scope["parsed_body"]
+ return None
+
+
+def _safe_set_request_parsed_body(
+ request: Optional[Request],
+ parsed_body: dict,
+) -> None:
+ try:
+ if request is None:
+ return
+ request.scope["parsed_body"] = parsed_body
+ except Exception as e:
+ verbose_proxy_logger.debug(
+ "Unexpected error setting request parsed body - {}".format(e)
+ )
+
+
+def _safe_get_request_headers(request: Optional[Request]) -> dict:
+ """
+ [Non-Blocking] Safely get the request headers
+ """
+ try:
+ if request is None:
+ return {}
+ return dict(request.headers)
+ except Exception as e:
+ verbose_proxy_logger.debug(
+ "Unexpected error reading request headers - {}".format(e)
+ )
+ return {}
+
+
+def check_file_size_under_limit(
+ request_data: dict,
+ file: UploadFile,
+ router_model_names: List[str],
+) -> bool:
+ """
+ Check if any files passed in request are under max_file_size_mb
+
+ Returns True -> when file size is under max_file_size_mb limit
+ Raises ProxyException -> when file size is over max_file_size_mb limit or not a premium_user
+ """
+ from litellm.proxy.proxy_server import (
+ CommonProxyErrors,
+ ProxyException,
+ llm_router,
+ premium_user,
+ )
+
+ file_contents_size = file.size or 0
+ file_content_size_in_mb = file_contents_size / (1024 * 1024)
+ if "metadata" not in request_data:
+ request_data["metadata"] = {}
+ request_data["metadata"]["file_size_in_mb"] = file_content_size_in_mb
+ max_file_size_mb = None
+
+ if llm_router is not None and request_data["model"] in router_model_names:
+ try:
+ deployment: Optional[Deployment] = (
+ llm_router.get_deployment_by_model_group_name(
+ model_group_name=request_data["model"]
+ )
+ )
+ if (
+ deployment
+ and deployment.litellm_params is not None
+ and deployment.litellm_params.max_file_size_mb is not None
+ ):
+ max_file_size_mb = deployment.litellm_params.max_file_size_mb
+ except Exception as e:
+ verbose_proxy_logger.error(
+ "Got error when checking file size: %s", (str(e))
+ )
+
+ if max_file_size_mb is not None:
+ verbose_proxy_logger.debug(
+ "Checking file size, file content size=%s, max_file_size_mb=%s",
+ file_content_size_in_mb,
+ max_file_size_mb,
+ )
+ if not premium_user:
+ raise ProxyException(
+ message=f"Tried setting max_file_size_mb for /audio/transcriptions. {CommonProxyErrors.not_premium_user.value}",
+ code=status.HTTP_400_BAD_REQUEST,
+ type="bad_request",
+ param="file",
+ )
+ if file_content_size_in_mb > max_file_size_mb:
+ raise ProxyException(
+ message=f"File size is too large. Please check your file size. Passed file size: {file_content_size_in_mb} MB. Max file size: {max_file_size_mb} MB",
+ code=status.HTTP_400_BAD_REQUEST,
+ type="bad_request",
+ param="file",
+ )
+
+ return True