about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints/endpoints.py170
1 files changed, 170 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints/endpoints.py b/.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints/endpoints.py
new file mode 100644
index 00000000..f9ddf306
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/response_api_endpoints/endpoints.py
@@ -0,0 +1,170 @@
+from fastapi import APIRouter, Depends, Request, Response
+
+from litellm.proxy._types import *
+from litellm.proxy.auth.user_api_key_auth import UserAPIKeyAuth, user_api_key_auth
+from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
+
+router = APIRouter()
+
+
+@router.post(
+    "/v1/responses",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+@router.post(
+    "/responses",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+async def responses_api(
+    request: Request,
+    fastapi_response: Response,
+    user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
+):
+    """
+    Follows the OpenAI Responses API spec: https://platform.openai.com/docs/api-reference/responses
+
+    ```bash
+    curl -X POST http://localhost:4000/v1/responses \
+    -H "Content-Type: application/json" \
+    -H "Authorization: Bearer sk-1234" \
+    -d '{
+        "model": "gpt-4o",
+        "input": "Tell me about AI"
+    }'
+    ```
+    """
+    from litellm.proxy.proxy_server import (
+        _read_request_body,
+        general_settings,
+        llm_router,
+        proxy_config,
+        proxy_logging_obj,
+        select_data_generator,
+        user_api_base,
+        user_max_tokens,
+        user_model,
+        user_request_timeout,
+        user_temperature,
+        version,
+    )
+
+    data = await _read_request_body(request=request)
+    processor = ProxyBaseLLMRequestProcessing(data=data)
+    try:
+        return await processor.base_process_llm_request(
+            request=request,
+            fastapi_response=fastapi_response,
+            user_api_key_dict=user_api_key_dict,
+            route_type="aresponses",
+            proxy_logging_obj=proxy_logging_obj,
+            llm_router=llm_router,
+            general_settings=general_settings,
+            proxy_config=proxy_config,
+            select_data_generator=select_data_generator,
+            model=None,
+            user_model=user_model,
+            user_temperature=user_temperature,
+            user_request_timeout=user_request_timeout,
+            user_max_tokens=user_max_tokens,
+            user_api_base=user_api_base,
+            version=version,
+        )
+    except Exception as e:
+        raise await processor._handle_llm_api_exception(
+            e=e,
+            user_api_key_dict=user_api_key_dict,
+            proxy_logging_obj=proxy_logging_obj,
+            version=version,
+        )
+
+
+@router.get(
+    "/v1/responses/{response_id}",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+@router.get(
+    "/responses/{response_id}",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+async def get_response(
+    response_id: str,
+    request: Request,
+    fastapi_response: Response,
+    user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
+):
+    """
+    Get a response by ID.
+    
+    Follows the OpenAI Responses API spec: https://platform.openai.com/docs/api-reference/responses/get
+    
+    ```bash
+    curl -X GET http://localhost:4000/v1/responses/resp_abc123 \
+    -H "Authorization: Bearer sk-1234"
+    ```
+    """
+    # TODO: Implement response retrieval logic
+    pass
+
+
+@router.delete(
+    "/v1/responses/{response_id}",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+@router.delete(
+    "/responses/{response_id}",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+async def delete_response(
+    response_id: str,
+    request: Request,
+    fastapi_response: Response,
+    user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
+):
+    """
+    Delete a response by ID.
+    
+    Follows the OpenAI Responses API spec: https://platform.openai.com/docs/api-reference/responses/delete
+    
+    ```bash
+    curl -X DELETE http://localhost:4000/v1/responses/resp_abc123 \
+    -H "Authorization: Bearer sk-1234"
+    ```
+    """
+    # TODO: Implement response deletion logic
+    pass
+
+
+@router.get(
+    "/v1/responses/{response_id}/input_items",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+@router.get(
+    "/responses/{response_id}/input_items",
+    dependencies=[Depends(user_api_key_auth)],
+    tags=["responses"],
+)
+async def get_response_input_items(
+    response_id: str,
+    request: Request,
+    fastapi_response: Response,
+    user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
+):
+    """
+    Get input items for a response.
+    
+    Follows the OpenAI Responses API spec: https://platform.openai.com/docs/api-reference/responses/input-items
+    
+    ```bash
+    curl -X GET http://localhost:4000/v1/responses/resp_abc123/input_items \
+    -H "Authorization: Bearer sk-1234"
+    ```
+    """
+    # TODO: Implement input items retrieval logic
+    pass