about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py')
-rw-r--r--.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py50
1 files changed, 50 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py b/.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py
new file mode 100644
index 00000000..beeb012d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/rules.py
@@ -0,0 +1,50 @@
+from typing import Optional
+
+import litellm
+
+
+class Rules:
+    """
+    Fail calls based on the input or llm api output
+
+    Example usage:
+    import litellm
+    def my_custom_rule(input): # receives the model response
+            if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer
+                    return False
+            return True
+
+    litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call
+
+    response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user",
+        "content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"])
+    """
+
+    def __init__(self) -> None:
+        pass
+
+    def pre_call_rules(self, input: str, model: str):
+        for rule in litellm.pre_call_rules:
+            if callable(rule):
+                decision = rule(input)
+                if decision is False:
+                    raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model)  # type: ignore
+        return True
+
+    def post_call_rules(self, input: Optional[str], model: str) -> bool:
+        if input is None:
+            return True
+        for rule in litellm.post_call_rules:
+            if callable(rule):
+                decision = rule(input)
+                if isinstance(decision, bool):
+                    if decision is False:
+                        raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model)  # type: ignore
+                elif isinstance(decision, dict):
+                    decision_val = decision.get("decision", True)
+                    decision_message = decision.get(
+                        "message", "LLM Response failed post-call-rule check"
+                    )
+                    if decision_val is False:
+                        raise litellm.APIResponseValidationError(message=decision_message, llm_provider="", model=model)  # type: ignore
+        return True