aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-4a52a71956a8d46fcb7294ac71734504bb09bcc2.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py36
1 files changed, 36 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py b/.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py
new file mode 100644
index 00000000..7f320ac0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/proxy/custom_prompt_management.py
@@ -0,0 +1,36 @@
+from typing import List, Optional, Tuple
+
+from litellm._logging import verbose_logger
+from litellm.integrations.custom_prompt_management import CustomPromptManagement
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import StandardCallbackDynamicParams
+
+
+class X42PromptManagement(CustomPromptManagement):
+ def get_chat_completion_prompt(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ non_default_params: dict,
+ prompt_id: str,
+ prompt_variables: Optional[dict],
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> Tuple[str, List[AllMessageValues], dict]:
+ """
+ Returns:
+ - model: str - the model to use (can be pulled from prompt management tool)
+ - messages: List[AllMessageValues] - the messages to use (can be pulled from prompt management tool)
+ - non_default_params: dict - update with any optional params (e.g. temperature, max_tokens, etc.) to use (can be pulled from prompt management tool)
+ """
+ verbose_logger.debug(
+ f"in async get chat completion prompt. Prompt ID: {prompt_id}, Prompt Variables: {prompt_variables}, Dynamic Callback Params: {dynamic_callback_params}"
+ )
+
+ return model, messages, non_default_params
+
+ @property
+ def integration_name(self) -> str:
+ return "x42-prompt-management"
+
+
+x42_prompt_management = X42PromptManagement()