aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/langfuse_endpoints.py139
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py274
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py121
3 files changed, 534 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/langfuse_endpoints.py b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/langfuse_endpoints.py
new file mode 100644
index 00000000..0c91c326
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/langfuse_endpoints.py
@@ -0,0 +1,139 @@
+"""
+What is this?
+
+Logging Pass-Through Endpoints
+"""
+
+"""
+1. Create pass-through endpoints for any LITELLM_BASE_URL/langfuse/<endpoint> map to LANGFUSE_BASE_URL/<endpoint>
+"""
+
+import base64
+import os
+from base64 import b64encode
+from typing import Optional
+
+import httpx
+from fastapi import APIRouter, Request, Response
+
+import litellm
+from litellm.proxy._types import *
+from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
+from litellm.proxy.litellm_pre_call_utils import _get_dynamic_logging_metadata
+from litellm.proxy.pass_through_endpoints.pass_through_endpoints import (
+ create_pass_through_route,
+)
+
+router = APIRouter()
+default_vertex_config = None
+
+
+def create_request_copy(request: Request):
+ return {
+ "method": request.method,
+ "url": str(request.url),
+ "headers": dict(request.headers),
+ "cookies": request.cookies,
+ "query_params": dict(request.query_params),
+ }
+
+
+@router.api_route(
+ "/langfuse/{endpoint:path}",
+ methods=["GET", "POST", "PUT", "DELETE", "PATCH"],
+ tags=["Langfuse Pass-through", "pass-through"],
+)
+async def langfuse_proxy_route(
+ endpoint: str,
+ request: Request,
+ fastapi_response: Response,
+):
+ """
+ Call Langfuse via LiteLLM proxy. Works with Langfuse SDK.
+
+ [Docs](https://docs.litellm.ai/docs/pass_through/langfuse)
+ """
+ from litellm.proxy.proxy_server import proxy_config
+
+ ## CHECK FOR LITELLM API KEY IN THE QUERY PARAMS - ?..key=LITELLM_API_KEY
+ api_key = request.headers.get("Authorization") or ""
+
+ ## decrypt base64 hash
+ api_key = api_key.replace("Basic ", "")
+
+ decoded_bytes = base64.b64decode(api_key)
+ decoded_str = decoded_bytes.decode("utf-8")
+ api_key = decoded_str.split(":")[1] # assume api key is passed in as secret key
+
+ user_api_key_dict = await user_api_key_auth(
+ request=request, api_key="Bearer {}".format(api_key)
+ )
+
+ callback_settings_obj: Optional[TeamCallbackMetadata] = (
+ _get_dynamic_logging_metadata(
+ user_api_key_dict=user_api_key_dict, proxy_config=proxy_config
+ )
+ )
+
+ dynamic_langfuse_public_key: Optional[str] = None
+ dynamic_langfuse_secret_key: Optional[str] = None
+ dynamic_langfuse_host: Optional[str] = None
+ if (
+ callback_settings_obj is not None
+ and callback_settings_obj.callback_vars is not None
+ ):
+ for k, v in callback_settings_obj.callback_vars.items():
+ if k == "langfuse_public_key":
+ dynamic_langfuse_public_key = v
+ elif k == "langfuse_secret_key":
+ dynamic_langfuse_secret_key = v
+ elif k == "langfuse_host":
+ dynamic_langfuse_host = v
+
+ base_target_url: str = (
+ dynamic_langfuse_host
+ or os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
+ or "https://cloud.langfuse.com"
+ )
+ if not (
+ base_target_url.startswith("http://") or base_target_url.startswith("https://")
+ ):
+ # add http:// if unset, assume communicating over private network - e.g. render
+ base_target_url = "http://" + base_target_url
+
+ encoded_endpoint = httpx.URL(endpoint).path
+
+ # Ensure endpoint starts with '/' for proper URL construction
+ if not encoded_endpoint.startswith("/"):
+ encoded_endpoint = "/" + encoded_endpoint
+
+ # Construct the full target URL using httpx
+ base_url = httpx.URL(base_target_url)
+ updated_url = base_url.copy_with(path=encoded_endpoint)
+
+ # Add or update query parameters
+ langfuse_public_key = dynamic_langfuse_public_key or litellm.utils.get_secret(
+ secret_name="LANGFUSE_PUBLIC_KEY"
+ )
+ langfuse_secret_key = dynamic_langfuse_secret_key or litellm.utils.get_secret(
+ secret_name="LANGFUSE_SECRET_KEY"
+ )
+
+ langfuse_combined_key = "Basic " + b64encode(
+ f"{langfuse_public_key}:{langfuse_secret_key}".encode("utf-8")
+ ).decode("ascii")
+
+ ## CREATE PASS-THROUGH
+ endpoint_func = create_pass_through_route(
+ endpoint=endpoint,
+ target=str(updated_url),
+ custom_headers={"Authorization": langfuse_combined_key},
+ ) # dynamically construct pass-through endpoint based on incoming path
+ received_value = await endpoint_func(
+ request,
+ fastapi_response,
+ user_api_key_dict,
+ query_params=dict(request.query_params), # type: ignore
+ )
+
+ return received_value
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py
new file mode 100644
index 00000000..7444e3d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py
@@ -0,0 +1,274 @@
+import traceback
+from typing import Optional
+
+import httpx
+from fastapi import APIRouter, HTTPException, Request, Response, status
+
+import litellm
+from litellm._logging import verbose_proxy_logger
+from litellm.fine_tuning.main import vertex_fine_tuning_apis_instance
+from litellm.proxy._types import *
+from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
+from litellm.proxy.pass_through_endpoints.pass_through_endpoints import (
+ create_pass_through_route,
+)
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.passthrough_endpoints.vertex_ai import *
+
+from .vertex_passthrough_router import VertexPassThroughRouter
+
+router = APIRouter()
+vertex_pass_through_router = VertexPassThroughRouter()
+
+default_vertex_config: VertexPassThroughCredentials = VertexPassThroughCredentials()
+
+
+def _get_vertex_env_vars() -> VertexPassThroughCredentials:
+ """
+ Helper to get vertex pass through config from environment variables
+
+ The following environment variables are used:
+ - DEFAULT_VERTEXAI_PROJECT (project id)
+ - DEFAULT_VERTEXAI_LOCATION (location)
+ - DEFAULT_GOOGLE_APPLICATION_CREDENTIALS (path to credentials file)
+ """
+ return VertexPassThroughCredentials(
+ vertex_project=get_secret_str("DEFAULT_VERTEXAI_PROJECT"),
+ vertex_location=get_secret_str("DEFAULT_VERTEXAI_LOCATION"),
+ vertex_credentials=get_secret_str("DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"),
+ )
+
+
+def set_default_vertex_config(config: Optional[dict] = None):
+ """Sets vertex configuration from provided config and/or environment variables
+
+ Args:
+ config (Optional[dict]): Configuration dictionary
+ Example: {
+ "vertex_project": "my-project-123",
+ "vertex_location": "us-central1",
+ "vertex_credentials": "os.environ/GOOGLE_CREDS"
+ }
+ """
+ global default_vertex_config
+
+ # Initialize config dictionary if None
+ if config is None:
+ default_vertex_config = _get_vertex_env_vars()
+ return
+
+ if isinstance(config, dict):
+ for key, value in config.items():
+ if isinstance(value, str) and value.startswith("os.environ/"):
+ config[key] = litellm.get_secret(value)
+
+ _set_default_vertex_config(VertexPassThroughCredentials(**config))
+
+
+def _set_default_vertex_config(
+ vertex_pass_through_credentials: VertexPassThroughCredentials,
+):
+ global default_vertex_config
+ default_vertex_config = vertex_pass_through_credentials
+
+
+def exception_handler(e: Exception):
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.v1/projects/tuningJobs(): Exception occurred - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
+ if isinstance(e, HTTPException):
+ return ProxyException(
+ message=getattr(e, "message", str(e.detail)),
+ type=getattr(e, "type", "None"),
+ param=getattr(e, "param", "None"),
+ code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
+ )
+ else:
+ error_msg = f"{str(e)}"
+ return ProxyException(
+ message=getattr(e, "message", error_msg),
+ type=getattr(e, "type", "None"),
+ param=getattr(e, "param", "None"),
+ code=getattr(e, "status_code", 500),
+ )
+
+
+def construct_target_url(
+ base_url: str,
+ requested_route: str,
+ default_vertex_location: Optional[str],
+ default_vertex_project: Optional[str],
+) -> httpx.URL:
+ """
+ Allow user to specify their own project id / location.
+
+ If missing, use defaults
+
+ Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460
+
+ Constructed Url:
+ POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents
+ """
+ new_base_url = httpx.URL(base_url)
+ if "locations" in requested_route: # contains the target project id + location
+ updated_url = new_base_url.copy_with(path=requested_route)
+ return updated_url
+ """
+ - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest)
+ - Add default project id
+ - Add default location
+ """
+ vertex_version: Literal["v1", "v1beta1"] = "v1"
+ if "cachedContent" in requested_route:
+ vertex_version = "v1beta1"
+
+ base_requested_route = "{}/projects/{}/locations/{}".format(
+ vertex_version, default_vertex_project, default_vertex_location
+ )
+
+ updated_requested_route = "/" + base_requested_route + requested_route
+
+ updated_url = new_base_url.copy_with(path=updated_requested_route)
+ return updated_url
+
+
+@router.api_route(
+ "/vertex-ai/{endpoint:path}",
+ methods=["GET", "POST", "PUT", "DELETE", "PATCH"],
+ tags=["Vertex AI Pass-through", "pass-through"],
+ include_in_schema=False,
+)
+@router.api_route(
+ "/vertex_ai/{endpoint:path}",
+ methods=["GET", "POST", "PUT", "DELETE", "PATCH"],
+ tags=["Vertex AI Pass-through", "pass-through"],
+)
+async def vertex_proxy_route(
+ endpoint: str,
+ request: Request,
+ fastapi_response: Response,
+):
+ """
+ Call LiteLLM proxy via Vertex AI SDK.
+
+ [Docs](https://docs.litellm.ai/docs/pass_through/vertex_ai)
+ """
+ encoded_endpoint = httpx.URL(endpoint).path
+ verbose_proxy_logger.debug("requested endpoint %s", endpoint)
+ headers: dict = {}
+ api_key_to_use = get_litellm_virtual_key(request=request)
+ user_api_key_dict = await user_api_key_auth(
+ request=request,
+ api_key=api_key_to_use,
+ )
+
+ vertex_project: Optional[str] = (
+ VertexPassThroughRouter._get_vertex_project_id_from_url(endpoint)
+ )
+ vertex_location: Optional[str] = (
+ VertexPassThroughRouter._get_vertex_location_from_url(endpoint)
+ )
+ vertex_credentials = vertex_pass_through_router.get_vertex_credentials(
+ project_id=vertex_project,
+ location=vertex_location,
+ )
+
+ # Use headers from the incoming request if no vertex credentials are found
+ if vertex_credentials.vertex_project is None:
+ headers = dict(request.headers) or {}
+ verbose_proxy_logger.debug(
+ "default_vertex_config not set, incoming request headers %s", headers
+ )
+ base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/"
+ headers.pop("content-length", None)
+ headers.pop("host", None)
+ else:
+ vertex_project = vertex_credentials.vertex_project
+ vertex_location = vertex_credentials.vertex_location
+ vertex_credentials_str = vertex_credentials.vertex_credentials
+
+ # Construct base URL for the target endpoint
+ base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/"
+
+ _auth_header, vertex_project = (
+ await vertex_fine_tuning_apis_instance._ensure_access_token_async(
+ credentials=vertex_credentials_str,
+ project_id=vertex_project,
+ custom_llm_provider="vertex_ai_beta",
+ )
+ )
+
+ auth_header, _ = vertex_fine_tuning_apis_instance._get_token_and_url(
+ model="",
+ auth_header=_auth_header,
+ gemini_api_key=None,
+ vertex_credentials=vertex_credentials_str,
+ vertex_project=vertex_project,
+ vertex_location=vertex_location,
+ stream=False,
+ custom_llm_provider="vertex_ai_beta",
+ api_base="",
+ )
+
+ headers = {
+ "Authorization": f"Bearer {auth_header}",
+ }
+
+ request_route = encoded_endpoint
+ verbose_proxy_logger.debug("request_route %s", request_route)
+
+ # Ensure endpoint starts with '/' for proper URL construction
+ if not encoded_endpoint.startswith("/"):
+ encoded_endpoint = "/" + encoded_endpoint
+
+ # Construct the full target URL using httpx
+ updated_url = construct_target_url(
+ base_url=base_target_url,
+ requested_route=encoded_endpoint,
+ default_vertex_location=vertex_location,
+ default_vertex_project=vertex_project,
+ )
+ # base_url = httpx.URL(base_target_url)
+ # updated_url = base_url.copy_with(path=encoded_endpoint)
+
+ verbose_proxy_logger.debug("updated url %s", updated_url)
+
+ ## check for streaming
+ target = str(updated_url)
+ is_streaming_request = False
+ if "stream" in str(updated_url):
+ is_streaming_request = True
+ target += "?alt=sse"
+
+ ## CREATE PASS-THROUGH
+ endpoint_func = create_pass_through_route(
+ endpoint=endpoint,
+ target=target,
+ custom_headers=headers,
+ ) # dynamically construct pass-through endpoint based on incoming path
+ received_value = await endpoint_func(
+ request,
+ fastapi_response,
+ user_api_key_dict,
+ stream=is_streaming_request, # type: ignore
+ )
+
+ return received_value
+
+
+def get_litellm_virtual_key(request: Request) -> str:
+ """
+ Extract and format API key from request headers.
+ Prioritizes x-litellm-api-key over Authorization header.
+
+
+ Vertex JS SDK uses `Authorization` header, we use `x-litellm-api-key` to pass litellm virtual key
+
+ """
+ litellm_api_key = request.headers.get("x-litellm-api-key")
+ if litellm_api_key:
+ return f"Bearer {litellm_api_key}"
+ return request.headers.get("Authorization", "")
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py
new file mode 100644
index 00000000..0273a620
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py
@@ -0,0 +1,121 @@
+import json
+import re
+from typing import Dict, Optional
+
+from litellm._logging import verbose_proxy_logger
+from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import (
+ VertexPassThroughCredentials,
+)
+from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES
+
+
+class VertexPassThroughRouter:
+ """
+ Vertex Pass Through Router for Vertex AI pass-through endpoints
+
+
+ - if request specifies a project-id, location -> use credentials corresponding to the project-id, location
+ - if request does not specify a project-id, location -> use credentials corresponding to the DEFAULT_VERTEXAI_PROJECT, DEFAULT_VERTEXAI_LOCATION
+ """
+
+ def __init__(self):
+ """
+ Initialize the VertexPassThroughRouter
+ Stores the vertex credentials for each deployment key
+ ```
+ {
+ "project_id-location": VertexPassThroughCredentials,
+ "adroit-crow-us-central1": VertexPassThroughCredentials,
+ }
+ ```
+ """
+ self.deployment_key_to_vertex_credentials: Dict[
+ str, VertexPassThroughCredentials
+ ] = {}
+ pass
+
+ def get_vertex_credentials(
+ self, project_id: Optional[str], location: Optional[str]
+ ) -> VertexPassThroughCredentials:
+ """
+ Get the vertex credentials for the given project-id, location
+ """
+ from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import (
+ default_vertex_config,
+ )
+
+ deployment_key = self._get_deployment_key(
+ project_id=project_id,
+ location=location,
+ )
+ if deployment_key is None:
+ return default_vertex_config
+ if deployment_key in self.deployment_key_to_vertex_credentials:
+ return self.deployment_key_to_vertex_credentials[deployment_key]
+ else:
+ return default_vertex_config
+
+ def add_vertex_credentials(
+ self,
+ project_id: str,
+ location: str,
+ vertex_credentials: VERTEX_CREDENTIALS_TYPES,
+ ):
+ """
+ Add the vertex credentials for the given project-id, location
+ """
+ from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import (
+ _set_default_vertex_config,
+ )
+
+ deployment_key = self._get_deployment_key(
+ project_id=project_id,
+ location=location,
+ )
+ if deployment_key is None:
+ verbose_proxy_logger.debug(
+ "No deployment key found for project-id, location"
+ )
+ return
+ vertex_pass_through_credentials = VertexPassThroughCredentials(
+ vertex_project=project_id,
+ vertex_location=location,
+ vertex_credentials=vertex_credentials,
+ )
+ self.deployment_key_to_vertex_credentials[deployment_key] = (
+ vertex_pass_through_credentials
+ )
+ verbose_proxy_logger.debug(
+ f"self.deployment_key_to_vertex_credentials: {json.dumps(self.deployment_key_to_vertex_credentials, indent=4, default=str)}"
+ )
+ _set_default_vertex_config(vertex_pass_through_credentials)
+
+ def _get_deployment_key(
+ self, project_id: Optional[str], location: Optional[str]
+ ) -> Optional[str]:
+ """
+ Get the deployment key for the given project-id, location
+ """
+ if project_id is None or location is None:
+ return None
+ return f"{project_id}-{location}"
+
+ @staticmethod
+ def _get_vertex_project_id_from_url(url: str) -> Optional[str]:
+ """
+ Get the vertex project id from the url
+
+ `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent`
+ """
+ match = re.search(r"/projects/([^/]+)", url)
+ return match.group(1) if match else None
+
+ @staticmethod
+ def _get_vertex_location_from_url(url: str) -> Optional[str]:
+ """
+ Get the vertex location from the url
+
+ `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent`
+ """
+ match = re.search(r"/locations/([^/]+)", url)
+ return match.group(1) if match else None