aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-4a52a71956a8d46fcb7294ac71734504bb09bcc2.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py46
1 files changed, 46 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py b/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py
new file mode 100644
index 00000000..409bbe2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/llms/anthropic/common_utils.py
@@ -0,0 +1,46 @@
+"""
+This file contains common utils for anthropic calls.
+"""
+
+from typing import Optional, Union
+
+import httpx
+
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+
+
+class AnthropicError(BaseLLMException):
+ def __init__(
+ self,
+ status_code: int,
+ message,
+ headers: Optional[httpx.Headers] = None,
+ ):
+ super().__init__(status_code=status_code, message=message, headers=headers)
+
+
+def process_anthropic_headers(headers: Union[httpx.Headers, dict]) -> dict:
+ openai_headers = {}
+ if "anthropic-ratelimit-requests-limit" in headers:
+ openai_headers["x-ratelimit-limit-requests"] = headers[
+ "anthropic-ratelimit-requests-limit"
+ ]
+ if "anthropic-ratelimit-requests-remaining" in headers:
+ openai_headers["x-ratelimit-remaining-requests"] = headers[
+ "anthropic-ratelimit-requests-remaining"
+ ]
+ if "anthropic-ratelimit-tokens-limit" in headers:
+ openai_headers["x-ratelimit-limit-tokens"] = headers[
+ "anthropic-ratelimit-tokens-limit"
+ ]
+ if "anthropic-ratelimit-tokens-remaining" in headers:
+ openai_headers["x-ratelimit-remaining-tokens"] = headers[
+ "anthropic-ratelimit-tokens-remaining"
+ ]
+
+ llm_response_headers = {
+ "{}-{}".format("llm_provider", k): v for k, v in headers.items()
+ }
+
+ additional_headers = {**llm_response_headers, **openai_headers}
+ return additional_headers