From 4a52a71956a8d46fcb7294ac71734504bb09bcc2 Mon Sep 17 00:00:00 2001 From: S. Solomon Darnell Date: Fri, 28 Mar 2025 21:52:21 -0500 Subject: two version of R2R are here --- .../litellm/llms/huggingface/common_utils.py | 45 ++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 .venv/lib/python3.12/site-packages/litellm/llms/huggingface/common_utils.py (limited to '.venv/lib/python3.12/site-packages/litellm/llms/huggingface/common_utils.py') diff --git a/.venv/lib/python3.12/site-packages/litellm/llms/huggingface/common_utils.py b/.venv/lib/python3.12/site-packages/litellm/llms/huggingface/common_utils.py new file mode 100644 index 00000000..d793b298 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/llms/huggingface/common_utils.py @@ -0,0 +1,45 @@ +from typing import Literal, Optional, Union + +import httpx + +from litellm.llms.base_llm.chat.transformation import BaseLLMException + + +class HuggingfaceError(BaseLLMException): + def __init__( + self, + status_code: int, + message: str, + headers: Optional[Union[dict, httpx.Headers]] = None, + ): + super().__init__(status_code=status_code, message=message, headers=headers) + + +hf_tasks = Literal[ + "text-generation-inference", + "conversational", + "text-classification", + "text-generation", +] + +hf_task_list = [ + "text-generation-inference", + "conversational", + "text-classification", + "text-generation", +] + + +def output_parser(generated_text: str): + """ + Parse the output text to remove any special characters. In our current approach we just check for ChatML tokens. + + Initial issue that prompted this - https://github.com/BerriAI/litellm/issues/763 + """ + chat_template_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "", ""] + for token in chat_template_tokens: + if generated_text.strip().startswith(token): + generated_text = generated_text.replace(token, "", 1) + if generated_text.endswith(token): + generated_text = generated_text[::-1].replace(token[::-1], "", 1)[::-1] + return generated_text -- cgit v1.2.3