aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py26
1 files changed, 26 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py b/.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py
new file mode 100644
index 00000000..52e6686e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/example_config_yaml/custom_handler.py
@@ -0,0 +1,26 @@
+import time
+from typing import Any, Optional
+
+import litellm
+from litellm import CustomLLM, ImageObject, ImageResponse, completion, get_llm_provider
+from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
+from litellm.types.utils import ModelResponse
+
+
+class MyCustomLLM(CustomLLM):
+ def completion(self, *args, **kwargs) -> ModelResponse:
+ return litellm.completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hello world"}],
+ mock_response="Hi!",
+ ) # type: ignore
+
+ async def acompletion(self, *args, **kwargs) -> litellm.ModelResponse:
+ return litellm.completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hello world"}],
+ mock_response="Hi!",
+ ) # type: ignore
+
+
+my_custom_llm = MyCustomLLM()