diff options
author | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
---|---|---|
committer | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
commit | 4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch) | |
tree | ee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py | |
parent | cc961e04ba734dd72309fb548a2f97d67d578813 (diff) | |
download | gn-ai-master.tar.gz |
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py')
-rw-r--r-- | .venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py b/.venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py new file mode 100644 index 00000000..dc4c3222 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/groq/chat/handler.py @@ -0,0 +1,76 @@ +""" +Handles the chat completion request for groq +""" + +from typing import Callable, List, Optional, Union, cast + +from httpx._config import Timeout + +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.types.llms.openai import AllMessageValues +from litellm.types.utils import CustomStreamingDecoder +from litellm.utils import ModelResponse + +from ...groq.chat.transformation import GroqChatConfig +from ...openai_like.chat.handler import OpenAILikeChatHandler + + +class GroqChatCompletion(OpenAILikeChatHandler): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def completion( + self, + *, + model: str, + messages: list, + api_base: str, + custom_llm_provider: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key: Optional[str], + logging_obj, + optional_params: dict, + acompletion=None, + litellm_params=None, + logger_fn=None, + headers: Optional[dict] = None, + timeout: Optional[Union[float, Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + custom_endpoint: Optional[bool] = None, + streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False, + ): + messages = GroqChatConfig()._transform_messages( + messages=cast(List[AllMessageValues], messages), model=model + ) + + if optional_params.get("stream") is True: + fake_stream = GroqChatConfig()._should_fake_stream(optional_params) + else: + fake_stream = False + + return super().completion( + model=model, + messages=messages, + api_base=api_base, + custom_llm_provider=custom_llm_provider, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + optional_params=optional_params, + acompletion=acompletion, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + timeout=timeout, + client=client, + custom_endpoint=custom_endpoint, + streaming_decoder=streaming_decoder, + fake_stream=fake_stream, + ) |