diff options
author | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
---|---|---|
committer | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
commit | 4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch) | |
tree | ee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm | |
parent | cc961e04ba734dd72309fb548a2f97d67d578813 (diff) | |
download | gn-ai-master.tar.gz |
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm')
-rw-r--r-- | .venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/chat/transformation.py | 40 | ||||
-rw-r--r-- | .venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/embedding/README.md | 5 |
2 files changed, 45 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/chat/transformation.py b/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/chat/transformation.py new file mode 100644 index 00000000..9332e987 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/chat/transformation.py @@ -0,0 +1,40 @@ +""" +Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions` +""" + +from typing import Optional, Tuple + +from litellm.secret_managers.main import get_secret_str + +from ....utils import _remove_additional_properties, _remove_strict_from_schema +from ...openai.chat.gpt_transformation import OpenAIGPTConfig + + +class HostedVLLMChatConfig(OpenAIGPTConfig): + def map_openai_params( + self, + non_default_params: dict, + optional_params: dict, + model: str, + drop_params: bool, + ) -> dict: + _tools = non_default_params.pop("tools", None) + if _tools is not None: + # remove 'additionalProperties' from tools + _tools = _remove_additional_properties(_tools) + # remove 'strict' from tools + _tools = _remove_strict_from_schema(_tools) + if _tools is not None: + non_default_params["tools"] = _tools + return super().map_openai_params( + non_default_params, optional_params, model, drop_params + ) + + def _get_openai_compatible_provider_info( + self, api_base: Optional[str], api_key: Optional[str] + ) -> Tuple[Optional[str], Optional[str]]: + api_base = api_base or get_secret_str("HOSTED_VLLM_API_BASE") # type: ignore + dynamic_api_key = ( + api_key or get_secret_str("HOSTED_VLLM_API_KEY") or "fake-api-key" + ) # vllm does not require an api key + return api_base, dynamic_api_key diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/embedding/README.md b/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/embedding/README.md new file mode 100644 index 00000000..f82b3c77 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/hosted_vllm/embedding/README.md @@ -0,0 +1,5 @@ +No transformation is required for hosted_vllm embedding. + +VLLM is a superset of OpenAI's `embedding` endpoint. + +To pass provider-specific parameters, see [this](https://docs.litellm.ai/docs/completion/provider_specific_params)
\ No newline at end of file |