diff options
author | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
---|---|---|
committer | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
commit | 4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch) | |
tree | ee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/litellm/integrations/athina.py | |
parent | cc961e04ba734dd72309fb548a2f97d67d578813 (diff) | |
download | gn-ai-master.tar.gz |
Diffstat (limited to '.venv/lib/python3.12/site-packages/litellm/integrations/athina.py')
-rw-r--r-- | .venv/lib/python3.12/site-packages/litellm/integrations/athina.py | 102 |
1 files changed, 102 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/litellm/integrations/athina.py b/.venv/lib/python3.12/site-packages/litellm/integrations/athina.py new file mode 100644 index 00000000..705dc11f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/litellm/integrations/athina.py @@ -0,0 +1,102 @@ +import datetime + +import litellm + + +class AthinaLogger: + def __init__(self): + import os + + self.athina_api_key = os.getenv("ATHINA_API_KEY") + self.headers = { + "athina-api-key": self.athina_api_key, + "Content-Type": "application/json", + } + self.athina_logging_url = os.getenv("ATHINA_BASE_URL", "https://log.athina.ai") + "/api/v1/log/inference" + self.additional_keys = [ + "environment", + "prompt_slug", + "customer_id", + "customer_user_id", + "session_id", + "external_reference_id", + "context", + "expected_response", + "user_query", + "tags", + "user_feedback", + "model_options", + "custom_attributes", + ] + + def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): + import json + import traceback + + try: + is_stream = kwargs.get("stream", False) + if is_stream: + if "complete_streaming_response" in kwargs: + # Log the completion response in streaming mode + completion_response = kwargs["complete_streaming_response"] + response_json = ( + completion_response.model_dump() if completion_response else {} + ) + else: + # Skip logging if the completion response is not available + return + else: + # Log the completion response in non streaming mode + response_json = response_obj.model_dump() if response_obj else {} + data = { + "language_model_id": kwargs.get("model"), + "request": kwargs, + "response": response_json, + "prompt_tokens": response_json.get("usage", {}).get("prompt_tokens"), + "completion_tokens": response_json.get("usage", {}).get( + "completion_tokens" + ), + "total_tokens": response_json.get("usage", {}).get("total_tokens"), + } + + if ( + type(end_time) is datetime.datetime + and type(start_time) is datetime.datetime + ): + data["response_time"] = int( + (end_time - start_time).total_seconds() * 1000 + ) + + if "messages" in kwargs: + data["prompt"] = kwargs.get("messages", None) + + # Directly add tools or functions if present + optional_params = kwargs.get("optional_params", {}) + data.update( + (k, v) + for k, v in optional_params.items() + if k in ["tools", "functions"] + ) + + # Add additional metadata keys + metadata = kwargs.get("litellm_params", {}).get("metadata", {}) + if metadata: + for key in self.additional_keys: + if key in metadata: + data[key] = metadata[key] + response = litellm.module_level_client.post( + self.athina_logging_url, + headers=self.headers, + data=json.dumps(data, default=str), + ) + if response.status_code != 200: + print_verbose( + f"Athina Logger Error - {response.text}, {response.status_code}" + ) + else: + print_verbose(f"Athina Logger Succeeded - {response.text}") + except Exception as e: + print_verbose( + f"Athina Logger Error - {e}, Stack trace: {traceback.format_exc()}" + ) + pass |