about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/sentry_sdk
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/sentry_sdk
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/sentry_sdk')
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/__init__.py58
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_compat.py98
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_experimental_logger.py20
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_init_implementation.py84
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_lru_cache.py47
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_queue.py289
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_types.py313
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/_werkzeug.py98
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/ai/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/ai/monitoring.py115
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/ai/utils.py32
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/api.py433
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/attachments.py75
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/client.py1079
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/consts.py968
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/crons/__init__.py10
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/crons/api.py57
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/crons/consts.py4
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/crons/decorator.py135
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/debug.py41
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/envelope.py361
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/feature_flags.py68
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/hub.py739
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/__init__.py293
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_asgi_common.py108
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_wsgi_common.py271
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aiohttp.py357
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/anthropic.py288
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/argv.py31
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ariadne.py161
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/arq.py246
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asgi.py337
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncio.py144
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncpg.py208
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/atexit.py57
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aws_lambda.py499
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/beam.py176
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/boto3.py137
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/bottle.py221
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/__init__.py528
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/beat.py293
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/utils.py43
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/chalice.py134
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/clickhouse_driver.py157
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cloud_resource_context.py280
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cohere.py270
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dedupe.py51
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py747
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/asgi.py245
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/caching.py191
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/middleware.py187
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/signals_handlers.py91
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/templates.py188
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/transactions.py159
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/views.py96
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dramatiq.py168
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/excepthook.py83
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/executing.py67
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/falcon.py272
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/fastapi.py147
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/flask.py275
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gcp.py234
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gnu_backtrace.py107
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gql.py145
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/graphene.py151
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/__init__.py151
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/__init__.py7
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/client.py94
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/server.py100
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/client.py92
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/consts.py1
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/server.py66
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/httpx.py167
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huey.py174
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huggingface_hub.py175
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/langchain.py465
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/launchdarkly.py62
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/litestar.py306
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/logging.py298
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/loguru.py130
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/modules.py29
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openai.py429
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openfeature.py39
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/__init__.py7
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/consts.py5
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/integration.py58
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/propagator.py117
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/span_processor.py391
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pure_eval.py139
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pymongo.py214
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pyramid.py229
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/quart.py237
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ray.py141
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/__init__.py38
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_async_common.py108
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_sync_common.py113
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/consts.py19
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/caches.py121
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/queries.py68
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/rb.py32
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis.py69
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_cluster.py99
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_py_cluster_legacy.py50
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/utils.py144
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rq.py161
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rust_tracing.py284
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sanic.py368
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/serverless.py76
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/socket.py96
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_driver.py315
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_worker.py116
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sqlalchemy.py146
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlette.py740
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlite.py292
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/statsig.py37
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/stdlib.py265
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/strawberry.py393
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sys_exit.py70
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/threading.py121
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/tornado.py220
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/trytond.py50
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/typer.py60
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/unleash.py34
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/integrations/wsgi.py310
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/metrics.py965
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/monitor.py124
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/profiler/__init__.py49
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/profiler/continuous_profiler.py704
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/profiler/transaction_profiler.py837
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/profiler/utils.py199
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/scope.py1786
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/scrubber.py174
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/serializer.py388
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/session.py175
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/sessions.py278
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/spotlight.py233
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/tracing.py1358
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/tracing_utils.py904
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/transport.py910
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/types.py24
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/utils.py1907
-rw-r--r--.venv/lib/python3.12/site-packages/sentry_sdk/worker.py141
145 files changed, 34465 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/__init__.py
new file mode 100644
index 00000000..e7e069e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/__init__.py
@@ -0,0 +1,58 @@
+from sentry_sdk.scope import Scope
+from sentry_sdk.transport import Transport, HttpTransport
+from sentry_sdk.client import Client
+
+from sentry_sdk.api import *  # noqa
+
+from sentry_sdk.consts import VERSION  # noqa
+
+__all__ = [  # noqa
+    "Hub",
+    "Scope",
+    "Client",
+    "Transport",
+    "HttpTransport",
+    "integrations",
+    # From sentry_sdk.api
+    "init",
+    "add_breadcrumb",
+    "capture_event",
+    "capture_exception",
+    "capture_message",
+    "configure_scope",
+    "continue_trace",
+    "flush",
+    "get_baggage",
+    "get_client",
+    "get_global_scope",
+    "get_isolation_scope",
+    "get_current_scope",
+    "get_current_span",
+    "get_traceparent",
+    "is_initialized",
+    "isolation_scope",
+    "last_event_id",
+    "new_scope",
+    "push_scope",
+    "set_context",
+    "set_extra",
+    "set_level",
+    "set_measurement",
+    "set_tag",
+    "set_tags",
+    "set_user",
+    "start_span",
+    "start_transaction",
+    "trace",
+    "monitor",
+    "_experimental_logger",
+]
+
+# Initialize the debug support after everything is loaded
+from sentry_sdk.debug import init_debug_support
+
+init_debug_support()
+del init_debug_support
+
+# circular imports
+from sentry_sdk.hub import Hub
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_compat.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_compat.py
new file mode 100644
index 00000000..a811cf21
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_compat.py
@@ -0,0 +1,98 @@
+import sys
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import TypeVar
+
+    T = TypeVar("T")
+
+
+PY37 = sys.version_info[0] == 3 and sys.version_info[1] >= 7
+PY38 = sys.version_info[0] == 3 and sys.version_info[1] >= 8
+PY310 = sys.version_info[0] == 3 and sys.version_info[1] >= 10
+PY311 = sys.version_info[0] == 3 and sys.version_info[1] >= 11
+
+
+def with_metaclass(meta, *bases):
+    # type: (Any, *Any) -> Any
+    class MetaClass(type):
+        def __new__(metacls, name, this_bases, d):
+            # type: (Any, Any, Any, Any) -> Any
+            return meta(name, bases, d)
+
+    return type.__new__(MetaClass, "temporary_class", (), {})
+
+
+def check_uwsgi_thread_support():
+    # type: () -> bool
+    # We check two things here:
+    #
+    # 1. uWSGI doesn't run in threaded mode by default -- issue a warning if
+    #    that's the case.
+    #
+    # 2. Additionally, if uWSGI is running in preforking mode (default), it needs
+    #    the --py-call-uwsgi-fork-hooks option for the SDK to work properly. This
+    #    is because any background threads spawned before the main process is
+    #    forked are NOT CLEANED UP IN THE CHILDREN BY DEFAULT even if
+    #    --enable-threads is on. One has to explicitly provide
+    #    --py-call-uwsgi-fork-hooks to force uWSGI to run regular cpython
+    #    after-fork hooks that take care of cleaning up stale thread data.
+    try:
+        from uwsgi import opt  # type: ignore
+    except ImportError:
+        return True
+
+    from sentry_sdk.consts import FALSE_VALUES
+
+    def enabled(option):
+        # type: (str) -> bool
+        value = opt.get(option, False)
+        if isinstance(value, bool):
+            return value
+
+        if isinstance(value, bytes):
+            try:
+                value = value.decode()
+            except Exception:
+                pass
+
+        return value and str(value).lower() not in FALSE_VALUES
+
+    # When `threads` is passed in as a uwsgi option,
+    # `enable-threads` is implied on.
+    threads_enabled = "threads" in opt or enabled("enable-threads")
+    fork_hooks_on = enabled("py-call-uwsgi-fork-hooks")
+    lazy_mode = enabled("lazy-apps") or enabled("lazy")
+
+    if lazy_mode and not threads_enabled:
+        from warnings import warn
+
+        warn(
+            Warning(
+                "IMPORTANT: "
+                "We detected the use of uWSGI without thread support. "
+                "This might lead to unexpected issues. "
+                'Please run uWSGI with "--enable-threads" for full support.'
+            )
+        )
+
+        return False
+
+    elif not lazy_mode and (not threads_enabled or not fork_hooks_on):
+        from warnings import warn
+
+        warn(
+            Warning(
+                "IMPORTANT: "
+                "We detected the use of uWSGI in preforking mode without "
+                "thread support. This might lead to crashing workers. "
+                'Please run uWSGI with both "--enable-threads" and '
+                '"--py-call-uwsgi-fork-hooks" for full support.'
+            )
+        )
+
+        return False
+
+    return True
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_experimental_logger.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_experimental_logger.py
new file mode 100644
index 00000000..1f3cd5e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_experimental_logger.py
@@ -0,0 +1,20 @@
+# NOTE: this is the logger sentry exposes to users, not some generic logger.
+import functools
+from typing import Any
+
+from sentry_sdk import get_client, get_current_scope
+
+
+def _capture_log(severity_text, severity_number, template, **kwargs):
+    # type: (str, int, str, **Any) -> None
+    client = get_client()
+    scope = get_current_scope()
+    client.capture_log(scope, severity_text, severity_number, template, **kwargs)
+
+
+trace = functools.partial(_capture_log, "trace", 1)
+debug = functools.partial(_capture_log, "debug", 5)
+info = functools.partial(_capture_log, "info", 9)
+warn = functools.partial(_capture_log, "warn", 13)
+error = functools.partial(_capture_log, "error", 17)
+fatal = functools.partial(_capture_log, "fatal", 21)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_init_implementation.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_init_implementation.py
new file mode 100644
index 00000000..eb02b3d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_init_implementation.py
@@ -0,0 +1,84 @@
+import warnings
+
+from typing import TYPE_CHECKING
+
+import sentry_sdk
+
+if TYPE_CHECKING:
+    from typing import Any, ContextManager, Optional
+
+    import sentry_sdk.consts
+
+
+class _InitGuard:
+    _CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE = (
+        "Using the return value of sentry_sdk.init as a context manager "
+        "and manually calling the __enter__ and __exit__ methods on the "
+        "return value are deprecated. We are no longer maintaining this "
+        "functionality, and we will remove it in the next major release."
+    )
+
+    def __init__(self, client):
+        # type: (sentry_sdk.Client) -> None
+        self._client = client
+
+    def __enter__(self):
+        # type: () -> _InitGuard
+        warnings.warn(
+            self._CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE,
+            stacklevel=2,
+            category=DeprecationWarning,
+        )
+
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # type: (Any, Any, Any) -> None
+        warnings.warn(
+            self._CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE,
+            stacklevel=2,
+            category=DeprecationWarning,
+        )
+
+        c = self._client
+        if c is not None:
+            c.close()
+
+
+def _check_python_deprecations():
+    # type: () -> None
+    # Since we're likely to deprecate Python versions in the future, I'm keeping
+    # this handy function around. Use this to detect the Python version used and
+    # to output logger.warning()s if it's deprecated.
+    pass
+
+
+def _init(*args, **kwargs):
+    # type: (*Optional[str], **Any) -> ContextManager[Any]
+    """Initializes the SDK and optionally integrations.
+
+    This takes the same arguments as the client constructor.
+    """
+    client = sentry_sdk.Client(*args, **kwargs)
+    sentry_sdk.get_global_scope().set_client(client)
+    _check_python_deprecations()
+    rv = _InitGuard(client)
+    return rv
+
+
+if TYPE_CHECKING:
+    # Make mypy, PyCharm and other static analyzers think `init` is a type to
+    # have nicer autocompletion for params.
+    #
+    # Use `ClientConstructor` to define the argument types of `init` and
+    # `ContextManager[Any]` to tell static analyzers about the return type.
+
+    class init(sentry_sdk.consts.ClientConstructor, _InitGuard):  # noqa: N801
+        pass
+
+else:
+    # Alias `init` for actual usage. Go through the lambda indirection to throw
+    # PyCharm off of the weakly typed signature (it would otherwise discover
+    # both the weakly typed signature of `_init` and our faked `init` type).
+
+    init = (lambda: _init)()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_lru_cache.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_lru_cache.py
new file mode 100644
index 00000000..cbadd972
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_lru_cache.py
@@ -0,0 +1,47 @@
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+
+
+_SENTINEL = object()
+
+
+class LRUCache:
+    def __init__(self, max_size):
+        # type: (int) -> None
+        if max_size <= 0:
+            raise AssertionError(f"invalid max_size: {max_size}")
+        self.max_size = max_size
+        self._data = {}  # type: dict[Any, Any]
+        self.hits = self.misses = 0
+        self.full = False
+
+    def set(self, key, value):
+        # type: (Any, Any) -> None
+        current = self._data.pop(key, _SENTINEL)
+        if current is not _SENTINEL:
+            self._data[key] = value
+        elif self.full:
+            self._data.pop(next(iter(self._data)))
+            self._data[key] = value
+        else:
+            self._data[key] = value
+        self.full = len(self._data) >= self.max_size
+
+    def get(self, key, default=None):
+        # type: (Any, Any) -> Any
+        try:
+            ret = self._data.pop(key)
+        except KeyError:
+            self.misses += 1
+            ret = default
+        else:
+            self.hits += 1
+            self._data[key] = ret
+
+        return ret
+
+    def get_all(self):
+        # type: () -> list[tuple[Any, Any]]
+        return list(self._data.items())
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_queue.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_queue.py
new file mode 100644
index 00000000..a21c86ec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_queue.py
@@ -0,0 +1,289 @@
+"""
+A fork of Python 3.6's stdlib queue (found in Pythons 'cpython/Lib/queue.py')
+with Lock swapped out for RLock to avoid a deadlock while garbage collecting.
+
+https://github.com/python/cpython/blob/v3.6.12/Lib/queue.py
+
+
+See also
+https://codewithoutrules.com/2017/08/16/concurrency-python/
+https://bugs.python.org/issue14976
+https://github.com/sqlalchemy/sqlalchemy/blob/4eb747b61f0c1b1c25bdee3856d7195d10a0c227/lib/sqlalchemy/queue.py#L1
+
+We also vendor the code to evade eventlet's broken monkeypatching, see
+https://github.com/getsentry/sentry-python/pull/484
+
+
+Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
+
+All Rights Reserved
+
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee.  This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+"""
+
+import threading
+
+from collections import deque
+from time import time
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+
+__all__ = ["EmptyError", "FullError", "Queue"]
+
+
+class EmptyError(Exception):
+    "Exception raised by Queue.get(block=0)/get_nowait()."
+
+    pass
+
+
+class FullError(Exception):
+    "Exception raised by Queue.put(block=0)/put_nowait()."
+
+    pass
+
+
+class Queue:
+    """Create a queue object with a given maximum size.
+
+    If maxsize is <= 0, the queue size is infinite.
+    """
+
+    def __init__(self, maxsize=0):
+        self.maxsize = maxsize
+        self._init(maxsize)
+
+        # mutex must be held whenever the queue is mutating.  All methods
+        # that acquire mutex must release it before returning.  mutex
+        # is shared between the three conditions, so acquiring and
+        # releasing the conditions also acquires and releases mutex.
+        self.mutex = threading.RLock()
+
+        # Notify not_empty whenever an item is added to the queue; a
+        # thread waiting to get is notified then.
+        self.not_empty = threading.Condition(self.mutex)
+
+        # Notify not_full whenever an item is removed from the queue;
+        # a thread waiting to put is notified then.
+        self.not_full = threading.Condition(self.mutex)
+
+        # Notify all_tasks_done whenever the number of unfinished tasks
+        # drops to zero; thread waiting to join() is notified to resume
+        self.all_tasks_done = threading.Condition(self.mutex)
+        self.unfinished_tasks = 0
+
+    def task_done(self):
+        """Indicate that a formerly enqueued task is complete.
+
+        Used by Queue consumer threads.  For each get() used to fetch a task,
+        a subsequent call to task_done() tells the queue that the processing
+        on the task is complete.
+
+        If a join() is currently blocking, it will resume when all items
+        have been processed (meaning that a task_done() call was received
+        for every item that had been put() into the queue).
+
+        Raises a ValueError if called more times than there were items
+        placed in the queue.
+        """
+        with self.all_tasks_done:
+            unfinished = self.unfinished_tasks - 1
+            if unfinished <= 0:
+                if unfinished < 0:
+                    raise ValueError("task_done() called too many times")
+                self.all_tasks_done.notify_all()
+            self.unfinished_tasks = unfinished
+
+    def join(self):
+        """Blocks until all items in the Queue have been gotten and processed.
+
+        The count of unfinished tasks goes up whenever an item is added to the
+        queue. The count goes down whenever a consumer thread calls task_done()
+        to indicate the item was retrieved and all work on it is complete.
+
+        When the count of unfinished tasks drops to zero, join() unblocks.
+        """
+        with self.all_tasks_done:
+            while self.unfinished_tasks:
+                self.all_tasks_done.wait()
+
+    def qsize(self):
+        """Return the approximate size of the queue (not reliable!)."""
+        with self.mutex:
+            return self._qsize()
+
+    def empty(self):
+        """Return True if the queue is empty, False otherwise (not reliable!).
+
+        This method is likely to be removed at some point.  Use qsize() == 0
+        as a direct substitute, but be aware that either approach risks a race
+        condition where a queue can grow before the result of empty() or
+        qsize() can be used.
+
+        To create code that needs to wait for all queued tasks to be
+        completed, the preferred technique is to use the join() method.
+        """
+        with self.mutex:
+            return not self._qsize()
+
+    def full(self):
+        """Return True if the queue is full, False otherwise (not reliable!).
+
+        This method is likely to be removed at some point.  Use qsize() >= n
+        as a direct substitute, but be aware that either approach risks a race
+        condition where a queue can shrink before the result of full() or
+        qsize() can be used.
+        """
+        with self.mutex:
+            return 0 < self.maxsize <= self._qsize()
+
+    def put(self, item, block=True, timeout=None):
+        """Put an item into the queue.
+
+        If optional args 'block' is true and 'timeout' is None (the default),
+        block if necessary until a free slot is available. If 'timeout' is
+        a non-negative number, it blocks at most 'timeout' seconds and raises
+        the FullError exception if no free slot was available within that time.
+        Otherwise ('block' is false), put an item on the queue if a free slot
+        is immediately available, else raise the FullError exception ('timeout'
+        is ignored in that case).
+        """
+        with self.not_full:
+            if self.maxsize > 0:
+                if not block:
+                    if self._qsize() >= self.maxsize:
+                        raise FullError()
+                elif timeout is None:
+                    while self._qsize() >= self.maxsize:
+                        self.not_full.wait()
+                elif timeout < 0:
+                    raise ValueError("'timeout' must be a non-negative number")
+                else:
+                    endtime = time() + timeout
+                    while self._qsize() >= self.maxsize:
+                        remaining = endtime - time()
+                        if remaining <= 0.0:
+                            raise FullError()
+                        self.not_full.wait(remaining)
+            self._put(item)
+            self.unfinished_tasks += 1
+            self.not_empty.notify()
+
+    def get(self, block=True, timeout=None):
+        """Remove and return an item from the queue.
+
+        If optional args 'block' is true and 'timeout' is None (the default),
+        block if necessary until an item is available. If 'timeout' is
+        a non-negative number, it blocks at most 'timeout' seconds and raises
+        the EmptyError exception if no item was available within that time.
+        Otherwise ('block' is false), return an item if one is immediately
+        available, else raise the EmptyError exception ('timeout' is ignored
+        in that case).
+        """
+        with self.not_empty:
+            if not block:
+                if not self._qsize():
+                    raise EmptyError()
+            elif timeout is None:
+                while not self._qsize():
+                    self.not_empty.wait()
+            elif timeout < 0:
+                raise ValueError("'timeout' must be a non-negative number")
+            else:
+                endtime = time() + timeout
+                while not self._qsize():
+                    remaining = endtime - time()
+                    if remaining <= 0.0:
+                        raise EmptyError()
+                    self.not_empty.wait(remaining)
+            item = self._get()
+            self.not_full.notify()
+            return item
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        Only enqueue the item if a free slot is immediately available.
+        Otherwise raise the FullError exception.
+        """
+        return self.put(item, block=False)
+
+    def get_nowait(self):
+        """Remove and return an item from the queue without blocking.
+
+        Only get an item if one is immediately available. Otherwise
+        raise the EmptyError exception.
+        """
+        return self.get(block=False)
+
+    # Override these methods to implement other queue organizations
+    # (e.g. stack or priority queue).
+    # These will only be called with appropriate locks held
+
+    # Initialize the queue representation
+    def _init(self, maxsize):
+        self.queue = deque()  # type: Any
+
+    def _qsize(self):
+        return len(self.queue)
+
+    # Put a new item in the queue
+    def _put(self, item):
+        self.queue.append(item)
+
+    # Get an item from the queue
+    def _get(self):
+        return self.queue.popleft()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_types.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_types.py
new file mode 100644
index 00000000..bc730719
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_types.py
@@ -0,0 +1,313 @@
+from typing import TYPE_CHECKING, TypeVar, Union
+
+
+# Re-exported for compat, since code out there in the wild might use this variable.
+MYPY = TYPE_CHECKING
+
+
+SENSITIVE_DATA_SUBSTITUTE = "[Filtered]"
+
+
+class AnnotatedValue:
+    """
+    Meta information for a data field in the event payload.
+    This is to tell Relay that we have tampered with the fields value.
+    See:
+    https://github.com/getsentry/relay/blob/be12cd49a0f06ea932ed9b9f93a655de5d6ad6d1/relay-general/src/types/meta.rs#L407-L423
+    """
+
+    __slots__ = ("value", "metadata")
+
+    def __init__(self, value, metadata):
+        # type: (Optional[Any], Dict[str, Any]) -> None
+        self.value = value
+        self.metadata = metadata
+
+    def __eq__(self, other):
+        # type: (Any) -> bool
+        if not isinstance(other, AnnotatedValue):
+            return False
+
+        return self.value == other.value and self.metadata == other.metadata
+
+    @classmethod
+    def removed_because_raw_data(cls):
+        # type: () -> AnnotatedValue
+        """The value was removed because it could not be parsed. This is done for request body values that are not json nor a form."""
+        return AnnotatedValue(
+            value="",
+            metadata={
+                "rem": [  # Remark
+                    [
+                        "!raw",  # Unparsable raw data
+                        "x",  # The fields original value was removed
+                    ]
+                ]
+            },
+        )
+
+    @classmethod
+    def removed_because_over_size_limit(cls):
+        # type: () -> AnnotatedValue
+        """The actual value was removed because the size of the field exceeded the configured maximum size (specified with the max_request_body_size sdk option)"""
+        return AnnotatedValue(
+            value="",
+            metadata={
+                "rem": [  # Remark
+                    [
+                        "!config",  # Because of configured maximum size
+                        "x",  # The fields original value was removed
+                    ]
+                ]
+            },
+        )
+
+    @classmethod
+    def substituted_because_contains_sensitive_data(cls):
+        # type: () -> AnnotatedValue
+        """The actual value was removed because it contained sensitive information."""
+        return AnnotatedValue(
+            value=SENSITIVE_DATA_SUBSTITUTE,
+            metadata={
+                "rem": [  # Remark
+                    [
+                        "!config",  # Because of SDK configuration (in this case the config is the hard coded removal of certain django cookies)
+                        "s",  # The fields original value was substituted
+                    ]
+                ]
+            },
+        )
+
+
+T = TypeVar("T")
+Annotated = Union[AnnotatedValue, T]
+
+
+if TYPE_CHECKING:
+    from collections.abc import Container, MutableMapping, Sequence
+
+    from datetime import datetime
+
+    from types import TracebackType
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Mapping
+    from typing import NotRequired
+    from typing import Optional
+    from typing import Tuple
+    from typing import Type
+    from typing_extensions import Literal, TypedDict
+
+    class SDKInfo(TypedDict):
+        name: str
+        version: str
+        packages: Sequence[Mapping[str, str]]
+
+    # "critical" is an alias of "fatal" recognized by Relay
+    LogLevelStr = Literal["fatal", "critical", "error", "warning", "info", "debug"]
+
+    DurationUnit = Literal[
+        "nanosecond",
+        "microsecond",
+        "millisecond",
+        "second",
+        "minute",
+        "hour",
+        "day",
+        "week",
+    ]
+
+    InformationUnit = Literal[
+        "bit",
+        "byte",
+        "kilobyte",
+        "kibibyte",
+        "megabyte",
+        "mebibyte",
+        "gigabyte",
+        "gibibyte",
+        "terabyte",
+        "tebibyte",
+        "petabyte",
+        "pebibyte",
+        "exabyte",
+        "exbibyte",
+    ]
+
+    FractionUnit = Literal["ratio", "percent"]
+    MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str]
+
+    MeasurementValue = TypedDict(
+        "MeasurementValue",
+        {
+            "value": float,
+            "unit": NotRequired[Optional[MeasurementUnit]],
+        },
+    )
+
+    Event = TypedDict(
+        "Event",
+        {
+            "breadcrumbs": dict[
+                Literal["values"], list[dict[str, Any]]
+            ],  # TODO: We can expand on this type
+            "check_in_id": str,
+            "contexts": dict[str, dict[str, object]],
+            "dist": str,
+            "duration": Optional[float],
+            "environment": str,
+            "errors": list[dict[str, Any]],  # TODO: We can expand on this type
+            "event_id": str,
+            "exception": dict[
+                Literal["values"], list[dict[str, Any]]
+            ],  # TODO: We can expand on this type
+            "extra": MutableMapping[str, object],
+            "fingerprint": list[str],
+            "level": LogLevelStr,
+            "logentry": Mapping[str, object],
+            "logger": str,
+            "measurements": dict[str, MeasurementValue],
+            "message": str,
+            "modules": dict[str, str],
+            "monitor_config": Mapping[str, object],
+            "monitor_slug": Optional[str],
+            "platform": Literal["python"],
+            "profile": object,  # Should be sentry_sdk.profiler.Profile, but we can't import that here due to circular imports
+            "release": str,
+            "request": dict[str, object],
+            "sdk": Mapping[str, object],
+            "server_name": str,
+            "spans": Annotated[list[dict[str, object]]],
+            "stacktrace": dict[
+                str, object
+            ],  # We access this key in the code, but I am unsure whether we ever set it
+            "start_timestamp": datetime,
+            "status": Optional[str],
+            "tags": MutableMapping[
+                str, str
+            ],  # Tags must be less than 200 characters each
+            "threads": dict[
+                Literal["values"], list[dict[str, Any]]
+            ],  # TODO: We can expand on this type
+            "timestamp": Optional[datetime],  # Must be set before sending the event
+            "transaction": str,
+            "transaction_info": Mapping[str, Any],  # TODO: We can expand on this type
+            "type": Literal["check_in", "transaction"],
+            "user": dict[str, object],
+            "_dropped_spans": int,
+            "_metrics_summary": dict[str, object],
+        },
+        total=False,
+    )
+
+    ExcInfo = Union[
+        tuple[Type[BaseException], BaseException, Optional[TracebackType]],
+        tuple[None, None, None],
+    ]
+
+    Hint = Dict[str, Any]
+    Log = TypedDict(
+        "Log",
+        {
+            "severity_text": str,
+            "severity_number": int,
+            "body": str,
+            "attributes": dict[str, str | bool | float | int],
+            "time_unix_nano": int,
+            "trace_id": Optional[str],
+        },
+    )
+
+    Breadcrumb = Dict[str, Any]
+    BreadcrumbHint = Dict[str, Any]
+
+    SamplingContext = Dict[str, Any]
+
+    EventProcessor = Callable[[Event, Hint], Optional[Event]]
+    ErrorProcessor = Callable[[Event, ExcInfo], Optional[Event]]
+    BreadcrumbProcessor = Callable[[Breadcrumb, BreadcrumbHint], Optional[Breadcrumb]]
+    TransactionProcessor = Callable[[Event, Hint], Optional[Event]]
+    LogProcessor = Callable[[Log, Hint], Optional[Log]]
+
+    TracesSampler = Callable[[SamplingContext], Union[float, int, bool]]
+
+    # https://github.com/python/mypy/issues/5710
+    NotImplementedType = Any
+
+    EventDataCategory = Literal[
+        "default",
+        "error",
+        "crash",
+        "transaction",
+        "security",
+        "attachment",
+        "session",
+        "internal",
+        "profile",
+        "profile_chunk",
+        "metric_bucket",
+        "monitor",
+        "span",
+        "log",
+    ]
+    SessionStatus = Literal["ok", "exited", "crashed", "abnormal"]
+
+    ContinuousProfilerMode = Literal["thread", "gevent", "unknown"]
+    ProfilerMode = Union[ContinuousProfilerMode, Literal["sleep"]]
+
+    # Type of the metric.
+    MetricType = Literal["d", "s", "g", "c"]
+
+    # Value of the metric.
+    MetricValue = Union[int, float, str]
+
+    # Internal representation of tags as a tuple of tuples (this is done in order to allow for the same key to exist
+    # multiple times).
+    MetricTagsInternal = Tuple[Tuple[str, str], ...]
+
+    # External representation of tags as a dictionary.
+    MetricTagValue = Union[str, int, float, None]
+    MetricTags = Mapping[str, MetricTagValue]
+
+    # Value inside the generator for the metric value.
+    FlushedMetricValue = Union[int, float]
+
+    BucketKey = Tuple[MetricType, str, MeasurementUnit, MetricTagsInternal]
+    MetricMetaKey = Tuple[MetricType, str, MeasurementUnit]
+
+    MonitorConfigScheduleType = Literal["crontab", "interval"]
+    MonitorConfigScheduleUnit = Literal[
+        "year",
+        "month",
+        "week",
+        "day",
+        "hour",
+        "minute",
+        "second",  # not supported in Sentry and will result in a warning
+    ]
+
+    MonitorConfigSchedule = TypedDict(
+        "MonitorConfigSchedule",
+        {
+            "type": MonitorConfigScheduleType,
+            "value": Union[int, str],
+            "unit": MonitorConfigScheduleUnit,
+        },
+        total=False,
+    )
+
+    MonitorConfig = TypedDict(
+        "MonitorConfig",
+        {
+            "schedule": MonitorConfigSchedule,
+            "timezone": str,
+            "checkin_margin": int,
+            "max_runtime": int,
+            "failure_issue_threshold": int,
+            "recovery_threshold": int,
+        },
+        total=False,
+    )
+
+    HttpStatusCodeRange = Union[int, Container[int]]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/_werkzeug.py b/.venv/lib/python3.12/site-packages/sentry_sdk/_werkzeug.py
new file mode 100644
index 00000000..0fa3d611
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/_werkzeug.py
@@ -0,0 +1,98 @@
+"""
+Copyright (c) 2007 by the Pallets team.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+"""
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Dict
+    from typing import Iterator
+    from typing import Tuple
+
+
+#
+# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
+# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361
+#
+# We need this function because Django does not give us a "pure" http header
+# dict. So we might as well use it for all WSGI integrations.
+#
+def _get_headers(environ):
+    # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
+    """
+    Returns only proper HTTP headers.
+    """
+    for key, value in environ.items():
+        key = str(key)
+        if key.startswith("HTTP_") and key not in (
+            "HTTP_CONTENT_TYPE",
+            "HTTP_CONTENT_LENGTH",
+        ):
+            yield key[5:].replace("_", "-").title(), value
+        elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
+            yield key.replace("_", "-").title(), value
+
+
+#
+# `get_host` comes from `werkzeug.wsgi.get_host`
+# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145
+#
+def get_host(environ, use_x_forwarded_for=False):
+    # type: (Dict[str, str], bool) -> str
+    """
+    Return the host for the given WSGI environment.
+    """
+    if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
+        rv = environ["HTTP_X_FORWARDED_HOST"]
+        if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
+            rv = rv[:-3]
+        elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
+            rv = rv[:-4]
+    elif environ.get("HTTP_HOST"):
+        rv = environ["HTTP_HOST"]
+        if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
+            rv = rv[:-3]
+        elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
+            rv = rv[:-4]
+    elif environ.get("SERVER_NAME"):
+        rv = environ["SERVER_NAME"]
+        if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
+            ("https", "443"),
+            ("http", "80"),
+        ):
+            rv += ":" + environ["SERVER_PORT"]
+    else:
+        # In spite of the WSGI spec, SERVER_NAME might not be present.
+        rv = "unknown"
+
+    return rv
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/ai/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/ai/monitoring.py b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/monitoring.py
new file mode 100644
index 00000000..860833b8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/monitoring.py
@@ -0,0 +1,115 @@
+import inspect
+from functools import wraps
+
+import sentry_sdk.utils
+from sentry_sdk import start_span
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import ContextVar
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional, Callable, Any
+
+_ai_pipeline_name = ContextVar("ai_pipeline_name", default=None)
+
+
+def set_ai_pipeline_name(name):
+    # type: (Optional[str]) -> None
+    _ai_pipeline_name.set(name)
+
+
+def get_ai_pipeline_name():
+    # type: () -> Optional[str]
+    return _ai_pipeline_name.get()
+
+
+def ai_track(description, **span_kwargs):
+    # type: (str, Any) -> Callable[..., Any]
+    def decorator(f):
+        # type: (Callable[..., Any]) -> Callable[..., Any]
+        def sync_wrapped(*args, **kwargs):
+            # type: (Any, Any) -> Any
+            curr_pipeline = _ai_pipeline_name.get()
+            op = span_kwargs.get("op", "ai.run" if curr_pipeline else "ai.pipeline")
+
+            with start_span(name=description, op=op, **span_kwargs) as span:
+                for k, v in kwargs.pop("sentry_tags", {}).items():
+                    span.set_tag(k, v)
+                for k, v in kwargs.pop("sentry_data", {}).items():
+                    span.set_data(k, v)
+                if curr_pipeline:
+                    span.set_data("ai.pipeline.name", curr_pipeline)
+                    return f(*args, **kwargs)
+                else:
+                    _ai_pipeline_name.set(description)
+                    try:
+                        res = f(*args, **kwargs)
+                    except Exception as e:
+                        event, hint = sentry_sdk.utils.event_from_exception(
+                            e,
+                            client_options=sentry_sdk.get_client().options,
+                            mechanism={"type": "ai_monitoring", "handled": False},
+                        )
+                        sentry_sdk.capture_event(event, hint=hint)
+                        raise e from None
+                    finally:
+                        _ai_pipeline_name.set(None)
+                    return res
+
+        async def async_wrapped(*args, **kwargs):
+            # type: (Any, Any) -> Any
+            curr_pipeline = _ai_pipeline_name.get()
+            op = span_kwargs.get("op", "ai.run" if curr_pipeline else "ai.pipeline")
+
+            with start_span(name=description, op=op, **span_kwargs) as span:
+                for k, v in kwargs.pop("sentry_tags", {}).items():
+                    span.set_tag(k, v)
+                for k, v in kwargs.pop("sentry_data", {}).items():
+                    span.set_data(k, v)
+                if curr_pipeline:
+                    span.set_data("ai.pipeline.name", curr_pipeline)
+                    return await f(*args, **kwargs)
+                else:
+                    _ai_pipeline_name.set(description)
+                    try:
+                        res = await f(*args, **kwargs)
+                    except Exception as e:
+                        event, hint = sentry_sdk.utils.event_from_exception(
+                            e,
+                            client_options=sentry_sdk.get_client().options,
+                            mechanism={"type": "ai_monitoring", "handled": False},
+                        )
+                        sentry_sdk.capture_event(event, hint=hint)
+                        raise e from None
+                    finally:
+                        _ai_pipeline_name.set(None)
+                    return res
+
+        if inspect.iscoroutinefunction(f):
+            return wraps(f)(async_wrapped)
+        else:
+            return wraps(f)(sync_wrapped)
+
+    return decorator
+
+
+def record_token_usage(
+    span, prompt_tokens=None, completion_tokens=None, total_tokens=None
+):
+    # type: (Span, Optional[int], Optional[int], Optional[int]) -> None
+    ai_pipeline_name = get_ai_pipeline_name()
+    if ai_pipeline_name:
+        span.set_data("ai.pipeline.name", ai_pipeline_name)
+    if prompt_tokens is not None:
+        span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens)
+    if completion_tokens is not None:
+        span.set_measurement("ai_completion_tokens_used", value=completion_tokens)
+    if (
+        total_tokens is None
+        and prompt_tokens is not None
+        and completion_tokens is not None
+    ):
+        total_tokens = prompt_tokens + completion_tokens
+    if total_tokens is not None:
+        span.set_measurement("ai_total_tokens_used", total_tokens)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/ai/utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/utils.py
new file mode 100644
index 00000000..ed3494f6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/ai/utils.py
@@ -0,0 +1,32 @@
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import logger
+
+
+def _normalize_data(data):
+    # type: (Any) -> Any
+
+    # convert pydantic data (e.g. OpenAI v1+) to json compatible format
+    if hasattr(data, "model_dump"):
+        try:
+            return data.model_dump()
+        except Exception as e:
+            logger.warning("Could not convert pydantic data to JSON: %s", e)
+            return data
+    if isinstance(data, list):
+        if len(data) == 1:
+            return _normalize_data(data[0])  # remove empty dimensions
+        return list(_normalize_data(x) for x in data)
+    if isinstance(data, dict):
+        return {k: _normalize_data(v) for (k, v) in data.items()}
+    return data
+
+
+def set_data_normalized(span, key, value):
+    # type: (Span, str, Any) -> None
+    normalized = _normalize_data(value)
+    span.set_data(key, normalized)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/api.py b/.venv/lib/python3.12/site-packages/sentry_sdk/api.py
new file mode 100644
index 00000000..d6043407
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/api.py
@@ -0,0 +1,433 @@
+import inspect
+import warnings
+from contextlib import contextmanager
+
+from sentry_sdk import tracing_utils, Client
+from sentry_sdk._init_implementation import init
+from sentry_sdk.consts import INSTRUMENTER
+from sentry_sdk.scope import Scope, _ScopeManager, new_scope, isolation_scope
+from sentry_sdk.tracing import NoOpSpan, Transaction, trace
+from sentry_sdk.crons import monitor
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Mapping
+
+    from typing import Any
+    from typing import Dict
+    from typing import Generator
+    from typing import Optional
+    from typing import overload
+    from typing import Callable
+    from typing import TypeVar
+    from typing import ContextManager
+    from typing import Union
+
+    from typing_extensions import Unpack
+
+    from sentry_sdk.client import BaseClient
+    from sentry_sdk._types import (
+        Event,
+        Hint,
+        Breadcrumb,
+        BreadcrumbHint,
+        ExcInfo,
+        MeasurementUnit,
+        LogLevelStr,
+        SamplingContext,
+    )
+    from sentry_sdk.tracing import Span, TransactionKwargs
+
+    T = TypeVar("T")
+    F = TypeVar("F", bound=Callable[..., Any])
+else:
+
+    def overload(x):
+        # type: (T) -> T
+        return x
+
+
+# When changing this, update __all__ in __init__.py too
+__all__ = [
+    "init",
+    "add_breadcrumb",
+    "capture_event",
+    "capture_exception",
+    "capture_message",
+    "configure_scope",
+    "continue_trace",
+    "flush",
+    "get_baggage",
+    "get_client",
+    "get_global_scope",
+    "get_isolation_scope",
+    "get_current_scope",
+    "get_current_span",
+    "get_traceparent",
+    "is_initialized",
+    "isolation_scope",
+    "last_event_id",
+    "new_scope",
+    "push_scope",
+    "set_context",
+    "set_extra",
+    "set_level",
+    "set_measurement",
+    "set_tag",
+    "set_tags",
+    "set_user",
+    "start_span",
+    "start_transaction",
+    "trace",
+    "monitor",
+]
+
+
+def scopemethod(f):
+    # type: (F) -> F
+    f.__doc__ = "%s\n\n%s" % (
+        "Alias for :py:meth:`sentry_sdk.Scope.%s`" % f.__name__,
+        inspect.getdoc(getattr(Scope, f.__name__)),
+    )
+    return f
+
+
+def clientmethod(f):
+    # type: (F) -> F
+    f.__doc__ = "%s\n\n%s" % (
+        "Alias for :py:meth:`sentry_sdk.Client.%s`" % f.__name__,
+        inspect.getdoc(getattr(Client, f.__name__)),
+    )
+    return f
+
+
+@scopemethod
+def get_client():
+    # type: () -> BaseClient
+    return Scope.get_client()
+
+
+def is_initialized():
+    # type: () -> bool
+    """
+    .. versionadded:: 2.0.0
+
+    Returns whether Sentry has been initialized or not.
+
+    If a client is available and the client is active
+    (meaning it is configured to send data) then
+    Sentry is initialized.
+    """
+    return get_client().is_active()
+
+
+@scopemethod
+def get_global_scope():
+    # type: () -> Scope
+    return Scope.get_global_scope()
+
+
+@scopemethod
+def get_isolation_scope():
+    # type: () -> Scope
+    return Scope.get_isolation_scope()
+
+
+@scopemethod
+def get_current_scope():
+    # type: () -> Scope
+    return Scope.get_current_scope()
+
+
+@scopemethod
+def last_event_id():
+    # type: () -> Optional[str]
+    """
+    See :py:meth:`sentry_sdk.Scope.last_event_id` documentation regarding
+    this method's limitations.
+    """
+    return Scope.last_event_id()
+
+
+@scopemethod
+def capture_event(
+    event,  # type: Event
+    hint=None,  # type: Optional[Hint]
+    scope=None,  # type: Optional[Any]
+    **scope_kwargs,  # type: Any
+):
+    # type: (...) -> Optional[str]
+    return get_current_scope().capture_event(event, hint, scope=scope, **scope_kwargs)
+
+
+@scopemethod
+def capture_message(
+    message,  # type: str
+    level=None,  # type: Optional[LogLevelStr]
+    scope=None,  # type: Optional[Any]
+    **scope_kwargs,  # type: Any
+):
+    # type: (...) -> Optional[str]
+    return get_current_scope().capture_message(
+        message, level, scope=scope, **scope_kwargs
+    )
+
+
+@scopemethod
+def capture_exception(
+    error=None,  # type: Optional[Union[BaseException, ExcInfo]]
+    scope=None,  # type: Optional[Any]
+    **scope_kwargs,  # type: Any
+):
+    # type: (...) -> Optional[str]
+    return get_current_scope().capture_exception(error, scope=scope, **scope_kwargs)
+
+
+@scopemethod
+def add_breadcrumb(
+    crumb=None,  # type: Optional[Breadcrumb]
+    hint=None,  # type: Optional[BreadcrumbHint]
+    **kwargs,  # type: Any
+):
+    # type: (...) -> None
+    return get_isolation_scope().add_breadcrumb(crumb, hint, **kwargs)
+
+
+@overload
+def configure_scope():
+    # type: () -> ContextManager[Scope]
+    pass
+
+
+@overload
+def configure_scope(  # noqa: F811
+    callback,  # type: Callable[[Scope], None]
+):
+    # type: (...) -> None
+    pass
+
+
+def configure_scope(  # noqa: F811
+    callback=None,  # type: Optional[Callable[[Scope], None]]
+):
+    # type: (...) -> Optional[ContextManager[Scope]]
+    """
+    Reconfigures the scope.
+
+    :param callback: If provided, call the callback with the current scope.
+
+    :returns: If no callback is provided, returns a context manager that returns the scope.
+    """
+    warnings.warn(
+        "sentry_sdk.configure_scope is deprecated and will be removed in the next major version. "
+        "Please consult our migration guide to learn how to migrate to the new API: "
+        "https://docs.sentry.io/platforms/python/migration/1.x-to-2.x#scope-configuring",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    scope = get_isolation_scope()
+    scope.generate_propagation_context()
+
+    if callback is not None:
+        # TODO: used to return None when client is None. Check if this changes behavior.
+        callback(scope)
+
+        return None
+
+    @contextmanager
+    def inner():
+        # type: () -> Generator[Scope, None, None]
+        yield scope
+
+    return inner()
+
+
+@overload
+def push_scope():
+    # type: () -> ContextManager[Scope]
+    pass
+
+
+@overload
+def push_scope(  # noqa: F811
+    callback,  # type: Callable[[Scope], None]
+):
+    # type: (...) -> None
+    pass
+
+
+def push_scope(  # noqa: F811
+    callback=None,  # type: Optional[Callable[[Scope], None]]
+):
+    # type: (...) -> Optional[ContextManager[Scope]]
+    """
+    Pushes a new layer on the scope stack.
+
+    :param callback: If provided, this method pushes a scope, calls
+        `callback`, and pops the scope again.
+
+    :returns: If no `callback` is provided, a context manager that should
+        be used to pop the scope again.
+    """
+    warnings.warn(
+        "sentry_sdk.push_scope is deprecated and will be removed in the next major version. "
+        "Please consult our migration guide to learn how to migrate to the new API: "
+        "https://docs.sentry.io/platforms/python/migration/1.x-to-2.x#scope-pushing",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    if callback is not None:
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", DeprecationWarning)
+            with push_scope() as scope:
+                callback(scope)
+        return None
+
+    return _ScopeManager()
+
+
+@scopemethod
+def set_tag(key, value):
+    # type: (str, Any) -> None
+    return get_isolation_scope().set_tag(key, value)
+
+
+@scopemethod
+def set_tags(tags):
+    # type: (Mapping[str, object]) -> None
+    return get_isolation_scope().set_tags(tags)
+
+
+@scopemethod
+def set_context(key, value):
+    # type: (str, Dict[str, Any]) -> None
+    return get_isolation_scope().set_context(key, value)
+
+
+@scopemethod
+def set_extra(key, value):
+    # type: (str, Any) -> None
+    return get_isolation_scope().set_extra(key, value)
+
+
+@scopemethod
+def set_user(value):
+    # type: (Optional[Dict[str, Any]]) -> None
+    return get_isolation_scope().set_user(value)
+
+
+@scopemethod
+def set_level(value):
+    # type: (LogLevelStr) -> None
+    return get_isolation_scope().set_level(value)
+
+
+@clientmethod
+def flush(
+    timeout=None,  # type: Optional[float]
+    callback=None,  # type: Optional[Callable[[int, float], None]]
+):
+    # type: (...) -> None
+    return get_client().flush(timeout=timeout, callback=callback)
+
+
+@scopemethod
+def start_span(
+    **kwargs,  # type: Any
+):
+    # type: (...) -> Span
+    return get_current_scope().start_span(**kwargs)
+
+
+@scopemethod
+def start_transaction(
+    transaction=None,  # type: Optional[Transaction]
+    instrumenter=INSTRUMENTER.SENTRY,  # type: str
+    custom_sampling_context=None,  # type: Optional[SamplingContext]
+    **kwargs,  # type: Unpack[TransactionKwargs]
+):
+    # type: (...) -> Union[Transaction, NoOpSpan]
+    """
+    Start and return a transaction on the current scope.
+
+    Start an existing transaction if given, otherwise create and start a new
+    transaction with kwargs.
+
+    This is the entry point to manual tracing instrumentation.
+
+    A tree structure can be built by adding child spans to the transaction,
+    and child spans to other spans. To start a new child span within the
+    transaction or any span, call the respective `.start_child()` method.
+
+    Every child span must be finished before the transaction is finished,
+    otherwise the unfinished spans are discarded.
+
+    When used as context managers, spans and transactions are automatically
+    finished at the end of the `with` block. If not using context managers,
+    call the `.finish()` method.
+
+    When the transaction is finished, it will be sent to Sentry with all its
+    finished child spans.
+
+    :param transaction: The transaction to start. If omitted, we create and
+        start a new transaction.
+    :param instrumenter: This parameter is meant for internal use only. It
+        will be removed in the next major version.
+    :param custom_sampling_context: The transaction's custom sampling context.
+    :param kwargs: Optional keyword arguments to be passed to the Transaction
+        constructor. See :py:class:`sentry_sdk.tracing.Transaction` for
+        available arguments.
+    """
+    return get_current_scope().start_transaction(
+        transaction, instrumenter, custom_sampling_context, **kwargs
+    )
+
+
+def set_measurement(name, value, unit=""):
+    # type: (str, float, MeasurementUnit) -> None
+    transaction = get_current_scope().transaction
+    if transaction is not None:
+        transaction.set_measurement(name, value, unit)
+
+
+def get_current_span(scope=None):
+    # type: (Optional[Scope]) -> Optional[Span]
+    """
+    Returns the currently active span if there is one running, otherwise `None`
+    """
+    return tracing_utils.get_current_span(scope)
+
+
+def get_traceparent():
+    # type: () -> Optional[str]
+    """
+    Returns the traceparent either from the active span or from the scope.
+    """
+    return get_current_scope().get_traceparent()
+
+
+def get_baggage():
+    # type: () -> Optional[str]
+    """
+    Returns Baggage either from the active span or from the scope.
+    """
+    baggage = get_current_scope().get_baggage()
+    if baggage is not None:
+        return baggage.serialize()
+
+    return None
+
+
+def continue_trace(
+    environ_or_headers, op=None, name=None, source=None, origin="manual"
+):
+    # type: (Dict[str, Any], Optional[str], Optional[str], Optional[str], str) -> Transaction
+    """
+    Sets the propagation context from environment or headers and returns a transaction.
+    """
+    return get_isolation_scope().continue_trace(
+        environ_or_headers, op, name, source, origin
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/attachments.py b/.venv/lib/python3.12/site-packages/sentry_sdk/attachments.py
new file mode 100644
index 00000000..e5404f86
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/attachments.py
@@ -0,0 +1,75 @@
+import os
+import mimetypes
+
+from sentry_sdk.envelope import Item, PayloadRef
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional, Union, Callable
+
+
+class Attachment:
+    """Additional files/data to send along with an event.
+
+    This class stores attachments that can be sent along with an event. Attachments are files or other data, e.g.
+    config or log files, that are relevant to an event. Attachments are set on the ``Scope``, and are sent along with
+    all non-transaction events (or all events including transactions if ``add_to_transactions`` is ``True``) that are
+    captured within the ``Scope``.
+
+    To add an attachment to a ``Scope``, use :py:meth:`sentry_sdk.Scope.add_attachment`. The parameters for
+    ``add_attachment`` are the same as the parameters for this class's constructor.
+
+    :param bytes: Raw bytes of the attachment, or a function that returns the raw bytes. Must be provided unless
+                  ``path`` is provided.
+    :param filename: The filename of the attachment. Must be provided unless ``path`` is provided.
+    :param path: Path to a file to attach. Must be provided unless ``bytes`` is provided.
+    :param content_type: The content type of the attachment. If not provided, it will be guessed from the ``filename``
+                         parameter, if available, or the ``path`` parameter if ``filename`` is ``None``.
+    :param add_to_transactions: Whether to add this attachment to transactions. Defaults to ``False``.
+    """
+
+    def __init__(
+        self,
+        bytes=None,  # type: Union[None, bytes, Callable[[], bytes]]
+        filename=None,  # type: Optional[str]
+        path=None,  # type: Optional[str]
+        content_type=None,  # type: Optional[str]
+        add_to_transactions=False,  # type: bool
+    ):
+        # type: (...) -> None
+        if bytes is None and path is None:
+            raise TypeError("path or raw bytes required for attachment")
+        if filename is None and path is not None:
+            filename = os.path.basename(path)
+        if filename is None:
+            raise TypeError("filename is required for attachment")
+        if content_type is None:
+            content_type = mimetypes.guess_type(filename)[0]
+        self.bytes = bytes
+        self.filename = filename
+        self.path = path
+        self.content_type = content_type
+        self.add_to_transactions = add_to_transactions
+
+    def to_envelope_item(self):
+        # type: () -> Item
+        """Returns an envelope item for this attachment."""
+        payload = None  # type: Union[None, PayloadRef, bytes]
+        if self.bytes is not None:
+            if callable(self.bytes):
+                payload = self.bytes()
+            else:
+                payload = self.bytes
+        else:
+            payload = PayloadRef(path=self.path)
+        return Item(
+            payload=payload,
+            type="attachment",
+            content_type=self.content_type,
+            filename=self.filename,
+        )
+
+    def __repr__(self):
+        # type: () -> str
+        return "<Attachment %r>" % (self.filename,)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/client.py b/.venv/lib/python3.12/site-packages/sentry_sdk/client.py
new file mode 100644
index 00000000..0f973945
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/client.py
@@ -0,0 +1,1079 @@
+import json
+import os
+import time
+import uuid
+import random
+import socket
+import logging
+from collections.abc import Mapping
+from datetime import datetime, timezone
+from importlib import import_module
+from typing import TYPE_CHECKING, List, Dict, cast, overload
+import warnings
+
+from sentry_sdk._compat import PY37, check_uwsgi_thread_support
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    ContextVar,
+    capture_internal_exceptions,
+    current_stacktrace,
+    env_to_bool,
+    format_timestamp,
+    get_sdk_name,
+    get_type_name,
+    get_default_release,
+    handle_in_app,
+    is_gevent,
+    logger,
+)
+from sentry_sdk.serializer import serialize
+from sentry_sdk.tracing import trace
+from sentry_sdk.transport import BaseHttpTransport, make_transport
+from sentry_sdk.consts import (
+    DEFAULT_MAX_VALUE_LENGTH,
+    DEFAULT_OPTIONS,
+    INSTRUMENTER,
+    VERSION,
+    ClientConstructor,
+)
+from sentry_sdk.integrations import _DEFAULT_INTEGRATIONS, setup_integrations
+from sentry_sdk.integrations.dedupe import DedupeIntegration
+from sentry_sdk.sessions import SessionFlusher
+from sentry_sdk.envelope import Envelope
+from sentry_sdk.profiler.continuous_profiler import setup_continuous_profiler
+from sentry_sdk.profiler.transaction_profiler import (
+    has_profiling_enabled,
+    Profile,
+    setup_profiler,
+)
+from sentry_sdk.scrubber import EventScrubber
+from sentry_sdk.monitor import Monitor
+from sentry_sdk.spotlight import setup_spotlight
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Optional
+    from typing import Sequence
+    from typing import Type
+    from typing import Union
+    from typing import TypeVar
+
+    from sentry_sdk._types import Event, Hint, SDKInfo, Log
+    from sentry_sdk.integrations import Integration
+    from sentry_sdk.metrics import MetricsAggregator
+    from sentry_sdk.scope import Scope
+    from sentry_sdk.session import Session
+    from sentry_sdk.spotlight import SpotlightClient
+    from sentry_sdk.transport import Transport
+
+    I = TypeVar("I", bound=Integration)  # noqa: E741
+
+_client_init_debug = ContextVar("client_init_debug")
+
+
+SDK_INFO = {
+    "name": "sentry.python",  # SDK name will be overridden after integrations have been loaded with sentry_sdk.integrations.setup_integrations()
+    "version": VERSION,
+    "packages": [{"name": "pypi:sentry-sdk", "version": VERSION}],
+}  # type: SDKInfo
+
+
+def _get_options(*args, **kwargs):
+    # type: (*Optional[str], **Any) -> Dict[str, Any]
+    if args and (isinstance(args[0], (bytes, str)) or args[0] is None):
+        dsn = args[0]  # type: Optional[str]
+        args = args[1:]
+    else:
+        dsn = None
+
+    if len(args) > 1:
+        raise TypeError("Only single positional argument is expected")
+
+    rv = dict(DEFAULT_OPTIONS)
+    options = dict(*args, **kwargs)
+    if dsn is not None and options.get("dsn") is None:
+        options["dsn"] = dsn
+
+    for key, value in options.items():
+        if key not in rv:
+            raise TypeError("Unknown option %r" % (key,))
+
+        rv[key] = value
+
+    if rv["dsn"] is None:
+        rv["dsn"] = os.environ.get("SENTRY_DSN")
+
+    if rv["release"] is None:
+        rv["release"] = get_default_release()
+
+    if rv["environment"] is None:
+        rv["environment"] = os.environ.get("SENTRY_ENVIRONMENT") or "production"
+
+    if rv["debug"] is None:
+        rv["debug"] = env_to_bool(os.environ.get("SENTRY_DEBUG", "False"), strict=True)
+
+    if rv["server_name"] is None and hasattr(socket, "gethostname"):
+        rv["server_name"] = socket.gethostname()
+
+    if rv["instrumenter"] is None:
+        rv["instrumenter"] = INSTRUMENTER.SENTRY
+
+    if rv["project_root"] is None:
+        try:
+            project_root = os.getcwd()
+        except Exception:
+            project_root = None
+
+        rv["project_root"] = project_root
+
+    if rv["enable_tracing"] is True and rv["traces_sample_rate"] is None:
+        rv["traces_sample_rate"] = 1.0
+
+    if rv["event_scrubber"] is None:
+        rv["event_scrubber"] = EventScrubber(
+            send_default_pii=(
+                False if rv["send_default_pii"] is None else rv["send_default_pii"]
+            )
+        )
+
+    if rv["socket_options"] and not isinstance(rv["socket_options"], list):
+        logger.warning(
+            "Ignoring socket_options because of unexpected format. See urllib3.HTTPConnection.socket_options for the expected format."
+        )
+        rv["socket_options"] = None
+
+    if rv["enable_tracing"] is not None:
+        warnings.warn(
+            "The `enable_tracing` parameter is deprecated. Please use `traces_sample_rate` instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+
+    return rv
+
+
+try:
+    # Python 3.6+
+    module_not_found_error = ModuleNotFoundError
+except Exception:
+    # Older Python versions
+    module_not_found_error = ImportError  # type: ignore
+
+
+class BaseClient:
+    """
+    .. versionadded:: 2.0.0
+
+    The basic definition of a client that is used for sending data to Sentry.
+    """
+
+    spotlight = None  # type: Optional[SpotlightClient]
+
+    def __init__(self, options=None):
+        # type: (Optional[Dict[str, Any]]) -> None
+        self.options = (
+            options if options is not None else DEFAULT_OPTIONS
+        )  # type: Dict[str, Any]
+
+        self.transport = None  # type: Optional[Transport]
+        self.monitor = None  # type: Optional[Monitor]
+        self.metrics_aggregator = None  # type: Optional[MetricsAggregator]
+
+    def __getstate__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        return {"options": {}}
+
+    def __setstate__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        pass
+
+    @property
+    def dsn(self):
+        # type: () -> Optional[str]
+        return None
+
+    def should_send_default_pii(self):
+        # type: () -> bool
+        return False
+
+    def is_active(self):
+        # type: () -> bool
+        """
+        .. versionadded:: 2.0.0
+
+        Returns whether the client is active (able to send data to Sentry)
+        """
+        return False
+
+    def capture_event(self, *args, **kwargs):
+        # type: (*Any, **Any) -> Optional[str]
+        return None
+
+    def capture_log(self, scope, severity_text, severity_number, template, **kwargs):
+        # type: (Scope, str, int, str, **Any) -> None
+        pass
+
+    def capture_session(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        return None
+
+    if TYPE_CHECKING:
+
+        @overload
+        def get_integration(self, name_or_class):
+            # type: (str) -> Optional[Integration]
+            ...
+
+        @overload
+        def get_integration(self, name_or_class):
+            # type: (type[I]) -> Optional[I]
+            ...
+
+    def get_integration(self, name_or_class):
+        # type: (Union[str, type[Integration]]) -> Optional[Integration]
+        return None
+
+    def close(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        return None
+
+    def flush(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        return None
+
+    def __enter__(self):
+        # type: () -> BaseClient
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # type: (Any, Any, Any) -> None
+        return None
+
+
+class NonRecordingClient(BaseClient):
+    """
+    .. versionadded:: 2.0.0
+
+    A client that does not send any events to Sentry. This is used as a fallback when the Sentry SDK is not yet initialized.
+    """
+
+    pass
+
+
+class _Client(BaseClient):
+    """
+    The client is internally responsible for capturing the events and
+    forwarding them to sentry through the configured transport.  It takes
+    the client options as keyword arguments and optionally the DSN as first
+    argument.
+
+    Alias of :py:class:`sentry_sdk.Client`. (Was created for better intelisense support)
+    """
+
+    def __init__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        super(_Client, self).__init__(options=get_options(*args, **kwargs))
+        self._init_impl()
+
+    def __getstate__(self):
+        # type: () -> Any
+        return {"options": self.options}
+
+    def __setstate__(self, state):
+        # type: (Any) -> None
+        self.options = state["options"]
+        self._init_impl()
+
+    def _setup_instrumentation(self, functions_to_trace):
+        # type: (Sequence[Dict[str, str]]) -> None
+        """
+        Instruments the functions given in the list `functions_to_trace` with the `@sentry_sdk.tracing.trace` decorator.
+        """
+        for function in functions_to_trace:
+            class_name = None
+            function_qualname = function["qualified_name"]
+            module_name, function_name = function_qualname.rsplit(".", 1)
+
+            try:
+                # Try to import module and function
+                # ex: "mymodule.submodule.funcname"
+
+                module_obj = import_module(module_name)
+                function_obj = getattr(module_obj, function_name)
+                setattr(module_obj, function_name, trace(function_obj))
+                logger.debug("Enabled tracing for %s", function_qualname)
+            except module_not_found_error:
+                try:
+                    # Try to import a class
+                    # ex: "mymodule.submodule.MyClassName.member_function"
+
+                    module_name, class_name = module_name.rsplit(".", 1)
+                    module_obj = import_module(module_name)
+                    class_obj = getattr(module_obj, class_name)
+                    function_obj = getattr(class_obj, function_name)
+                    function_type = type(class_obj.__dict__[function_name])
+                    traced_function = trace(function_obj)
+
+                    if function_type in (staticmethod, classmethod):
+                        traced_function = staticmethod(traced_function)
+
+                    setattr(class_obj, function_name, traced_function)
+                    setattr(module_obj, class_name, class_obj)
+                    logger.debug("Enabled tracing for %s", function_qualname)
+
+                except Exception as e:
+                    logger.warning(
+                        "Can not enable tracing for '%s'. (%s) Please check your `functions_to_trace` parameter.",
+                        function_qualname,
+                        e,
+                    )
+
+            except Exception as e:
+                logger.warning(
+                    "Can not enable tracing for '%s'. (%s) Please check your `functions_to_trace` parameter.",
+                    function_qualname,
+                    e,
+                )
+
+    def _init_impl(self):
+        # type: () -> None
+        old_debug = _client_init_debug.get(False)
+
+        def _capture_envelope(envelope):
+            # type: (Envelope) -> None
+            if self.transport is not None:
+                self.transport.capture_envelope(envelope)
+
+        try:
+            _client_init_debug.set(self.options["debug"])
+            self.transport = make_transport(self.options)
+
+            self.monitor = None
+            if self.transport:
+                if self.options["enable_backpressure_handling"]:
+                    self.monitor = Monitor(self.transport)
+
+            self.session_flusher = SessionFlusher(capture_func=_capture_envelope)
+
+            self.metrics_aggregator = None  # type: Optional[MetricsAggregator]
+            experiments = self.options.get("_experiments", {})
+            if experiments.get("enable_metrics", True):
+                # Context vars are not working correctly on Python <=3.6
+                # with gevent.
+                metrics_supported = not is_gevent() or PY37
+                if metrics_supported:
+                    from sentry_sdk.metrics import MetricsAggregator
+
+                    self.metrics_aggregator = MetricsAggregator(
+                        capture_func=_capture_envelope,
+                        enable_code_locations=bool(
+                            experiments.get("metric_code_locations", True)
+                        ),
+                    )
+                else:
+                    logger.info(
+                        "Metrics not supported on Python 3.6 and lower with gevent."
+                    )
+
+            max_request_body_size = ("always", "never", "small", "medium")
+            if self.options["max_request_body_size"] not in max_request_body_size:
+                raise ValueError(
+                    "Invalid value for max_request_body_size. Must be one of {}".format(
+                        max_request_body_size
+                    )
+                )
+
+            if self.options["_experiments"].get("otel_powered_performance", False):
+                logger.debug(
+                    "[OTel] Enabling experimental OTel-powered performance monitoring."
+                )
+                self.options["instrumenter"] = INSTRUMENTER.OTEL
+                if (
+                    "sentry_sdk.integrations.opentelemetry.integration.OpenTelemetryIntegration"
+                    not in _DEFAULT_INTEGRATIONS
+                ):
+                    _DEFAULT_INTEGRATIONS.append(
+                        "sentry_sdk.integrations.opentelemetry.integration.OpenTelemetryIntegration",
+                    )
+
+            self.integrations = setup_integrations(
+                self.options["integrations"],
+                with_defaults=self.options["default_integrations"],
+                with_auto_enabling_integrations=self.options[
+                    "auto_enabling_integrations"
+                ],
+                disabled_integrations=self.options["disabled_integrations"],
+            )
+
+            spotlight_config = self.options.get("spotlight")
+            if spotlight_config is None and "SENTRY_SPOTLIGHT" in os.environ:
+                spotlight_env_value = os.environ["SENTRY_SPOTLIGHT"]
+                spotlight_config = env_to_bool(spotlight_env_value, strict=True)
+                self.options["spotlight"] = (
+                    spotlight_config
+                    if spotlight_config is not None
+                    else spotlight_env_value
+                )
+
+            if self.options.get("spotlight"):
+                self.spotlight = setup_spotlight(self.options)
+
+            sdk_name = get_sdk_name(list(self.integrations.keys()))
+            SDK_INFO["name"] = sdk_name
+            logger.debug("Setting SDK name to '%s'", sdk_name)
+
+            if has_profiling_enabled(self.options):
+                try:
+                    setup_profiler(self.options)
+                except Exception as e:
+                    logger.debug("Can not set up profiler. (%s)", e)
+            else:
+                try:
+                    setup_continuous_profiler(
+                        self.options,
+                        sdk_info=SDK_INFO,
+                        capture_func=_capture_envelope,
+                    )
+                except Exception as e:
+                    logger.debug("Can not set up continuous profiler. (%s)", e)
+
+        finally:
+            _client_init_debug.set(old_debug)
+
+        self._setup_instrumentation(self.options.get("functions_to_trace", []))
+
+        if (
+            self.monitor
+            or self.metrics_aggregator
+            or has_profiling_enabled(self.options)
+            or isinstance(self.transport, BaseHttpTransport)
+        ):
+            # If we have anything on that could spawn a background thread, we
+            # need to check if it's safe to use them.
+            check_uwsgi_thread_support()
+
+    def is_active(self):
+        # type: () -> bool
+        """
+        .. versionadded:: 2.0.0
+
+        Returns whether the client is active (able to send data to Sentry)
+        """
+        return True
+
+    def should_send_default_pii(self):
+        # type: () -> bool
+        """
+        .. versionadded:: 2.0.0
+
+        Returns whether the client should send default PII (Personally Identifiable Information) data to Sentry.
+        """
+        result = self.options.get("send_default_pii")
+        if result is None:
+            result = not self.options["dsn"] and self.spotlight is not None
+
+        return result
+
+    @property
+    def dsn(self):
+        # type: () -> Optional[str]
+        """Returns the configured DSN as string."""
+        return self.options["dsn"]
+
+    def _prepare_event(
+        self,
+        event,  # type: Event
+        hint,  # type: Hint
+        scope,  # type: Optional[Scope]
+    ):
+        # type: (...) -> Optional[Event]
+
+        previous_total_spans = None  # type: Optional[int]
+
+        if event.get("timestamp") is None:
+            event["timestamp"] = datetime.now(timezone.utc)
+
+        if scope is not None:
+            is_transaction = event.get("type") == "transaction"
+            spans_before = len(cast(List[Dict[str, object]], event.get("spans", [])))
+            event_ = scope.apply_to_event(event, hint, self.options)
+
+            # one of the event/error processors returned None
+            if event_ is None:
+                if self.transport:
+                    self.transport.record_lost_event(
+                        "event_processor",
+                        data_category=("transaction" if is_transaction else "error"),
+                    )
+                    if is_transaction:
+                        self.transport.record_lost_event(
+                            "event_processor",
+                            data_category="span",
+                            quantity=spans_before + 1,  # +1 for the transaction itself
+                        )
+                return None
+
+            event = event_
+            spans_delta = spans_before - len(
+                cast(List[Dict[str, object]], event.get("spans", []))
+            )
+            if is_transaction and spans_delta > 0 and self.transport is not None:
+                self.transport.record_lost_event(
+                    "event_processor", data_category="span", quantity=spans_delta
+                )
+
+            dropped_spans = event.pop("_dropped_spans", 0) + spans_delta  # type: int
+            if dropped_spans > 0:
+                previous_total_spans = spans_before + dropped_spans
+
+        if (
+            self.options["attach_stacktrace"]
+            and "exception" not in event
+            and "stacktrace" not in event
+            and "threads" not in event
+        ):
+            with capture_internal_exceptions():
+                event["threads"] = {
+                    "values": [
+                        {
+                            "stacktrace": current_stacktrace(
+                                include_local_variables=self.options.get(
+                                    "include_local_variables", True
+                                ),
+                                max_value_length=self.options.get(
+                                    "max_value_length", DEFAULT_MAX_VALUE_LENGTH
+                                ),
+                            ),
+                            "crashed": False,
+                            "current": True,
+                        }
+                    ]
+                }
+
+        for key in "release", "environment", "server_name", "dist":
+            if event.get(key) is None and self.options[key] is not None:
+                event[key] = str(self.options[key]).strip()
+        if event.get("sdk") is None:
+            sdk_info = dict(SDK_INFO)
+            sdk_info["integrations"] = sorted(self.integrations.keys())
+            event["sdk"] = sdk_info
+
+        if event.get("platform") is None:
+            event["platform"] = "python"
+
+        event = handle_in_app(
+            event,
+            self.options["in_app_exclude"],
+            self.options["in_app_include"],
+            self.options["project_root"],
+        )
+
+        if event is not None:
+            event_scrubber = self.options["event_scrubber"]
+            if event_scrubber:
+                event_scrubber.scrub_event(event)
+
+        if previous_total_spans is not None:
+            event["spans"] = AnnotatedValue(
+                event.get("spans", []), {"len": previous_total_spans}
+            )
+
+        # Postprocess the event here so that annotated types do
+        # generally not surface in before_send
+        if event is not None:
+            event = cast(
+                "Event",
+                serialize(
+                    cast("Dict[str, Any]", event),
+                    max_request_body_size=self.options.get("max_request_body_size"),
+                    max_value_length=self.options.get("max_value_length"),
+                    custom_repr=self.options.get("custom_repr"),
+                ),
+            )
+
+        before_send = self.options["before_send"]
+        if (
+            before_send is not None
+            and event is not None
+            and event.get("type") != "transaction"
+        ):
+            new_event = None
+            with capture_internal_exceptions():
+                new_event = before_send(event, hint or {})
+            if new_event is None:
+                logger.info("before send dropped event")
+                if self.transport:
+                    self.transport.record_lost_event(
+                        "before_send", data_category="error"
+                    )
+
+                # If this is an exception, reset the DedupeIntegration. It still
+                # remembers the dropped exception as the last exception, meaning
+                # that if the same exception happens again and is not dropped
+                # in before_send, it'd get dropped by DedupeIntegration.
+                if event.get("exception"):
+                    DedupeIntegration.reset_last_seen()
+
+            event = new_event
+
+        before_send_transaction = self.options["before_send_transaction"]
+        if (
+            before_send_transaction is not None
+            and event is not None
+            and event.get("type") == "transaction"
+        ):
+            new_event = None
+            spans_before = len(cast(List[Dict[str, object]], event.get("spans", [])))
+            with capture_internal_exceptions():
+                new_event = before_send_transaction(event, hint or {})
+            if new_event is None:
+                logger.info("before send transaction dropped event")
+                if self.transport:
+                    self.transport.record_lost_event(
+                        reason="before_send", data_category="transaction"
+                    )
+                    self.transport.record_lost_event(
+                        reason="before_send",
+                        data_category="span",
+                        quantity=spans_before + 1,  # +1 for the transaction itself
+                    )
+            else:
+                spans_delta = spans_before - len(new_event.get("spans", []))
+                if spans_delta > 0 and self.transport is not None:
+                    self.transport.record_lost_event(
+                        reason="before_send", data_category="span", quantity=spans_delta
+                    )
+
+            event = new_event
+
+        return event
+
+    def _is_ignored_error(self, event, hint):
+        # type: (Event, Hint) -> bool
+        exc_info = hint.get("exc_info")
+        if exc_info is None:
+            return False
+
+        error = exc_info[0]
+        error_type_name = get_type_name(exc_info[0])
+        error_full_name = "%s.%s" % (exc_info[0].__module__, error_type_name)
+
+        for ignored_error in self.options["ignore_errors"]:
+            # String types are matched against the type name in the
+            # exception only
+            if isinstance(ignored_error, str):
+                if ignored_error == error_full_name or ignored_error == error_type_name:
+                    return True
+            else:
+                if issubclass(error, ignored_error):
+                    return True
+
+        return False
+
+    def _should_capture(
+        self,
+        event,  # type: Event
+        hint,  # type: Hint
+        scope=None,  # type: Optional[Scope]
+    ):
+        # type: (...) -> bool
+        # Transactions are sampled independent of error events.
+        is_transaction = event.get("type") == "transaction"
+        if is_transaction:
+            return True
+
+        ignoring_prevents_recursion = scope is not None and not scope._should_capture
+        if ignoring_prevents_recursion:
+            return False
+
+        ignored_by_config_option = self._is_ignored_error(event, hint)
+        if ignored_by_config_option:
+            return False
+
+        return True
+
+    def _should_sample_error(
+        self,
+        event,  # type: Event
+        hint,  # type: Hint
+    ):
+        # type: (...) -> bool
+        error_sampler = self.options.get("error_sampler", None)
+
+        if callable(error_sampler):
+            with capture_internal_exceptions():
+                sample_rate = error_sampler(event, hint)
+        else:
+            sample_rate = self.options["sample_rate"]
+
+        try:
+            not_in_sample_rate = sample_rate < 1.0 and random.random() >= sample_rate
+        except NameError:
+            logger.warning(
+                "The provided error_sampler raised an error. Defaulting to sampling the event."
+            )
+
+            # If the error_sampler raised an error, we should sample the event, since the default behavior
+            # (when no sample_rate or error_sampler is provided) is to sample all events.
+            not_in_sample_rate = False
+        except TypeError:
+            parameter, verb = (
+                ("error_sampler", "returned")
+                if callable(error_sampler)
+                else ("sample_rate", "contains")
+            )
+            logger.warning(
+                "The provided %s %s an invalid value of %s. The value should be a float or a bool. Defaulting to sampling the event."
+                % (parameter, verb, repr(sample_rate))
+            )
+
+            # If the sample_rate has an invalid value, we should sample the event, since the default behavior
+            # (when no sample_rate or error_sampler is provided) is to sample all events.
+            not_in_sample_rate = False
+
+        if not_in_sample_rate:
+            # because we will not sample this event, record a "lost event".
+            if self.transport:
+                self.transport.record_lost_event("sample_rate", data_category="error")
+
+            return False
+
+        return True
+
+    def _update_session_from_event(
+        self,
+        session,  # type: Session
+        event,  # type: Event
+    ):
+        # type: (...) -> None
+
+        crashed = False
+        errored = False
+        user_agent = None
+
+        exceptions = (event.get("exception") or {}).get("values")
+        if exceptions:
+            errored = True
+            for error in exceptions:
+                mechanism = error.get("mechanism")
+                if isinstance(mechanism, Mapping) and mechanism.get("handled") is False:
+                    crashed = True
+                    break
+
+        user = event.get("user")
+
+        if session.user_agent is None:
+            headers = (event.get("request") or {}).get("headers")
+            headers_dict = headers if isinstance(headers, dict) else {}
+            for k, v in headers_dict.items():
+                if k.lower() == "user-agent":
+                    user_agent = v
+                    break
+
+        session.update(
+            status="crashed" if crashed else None,
+            user=user,
+            user_agent=user_agent,
+            errors=session.errors + (errored or crashed),
+        )
+
+    def capture_event(
+        self,
+        event,  # type: Event
+        hint=None,  # type: Optional[Hint]
+        scope=None,  # type: Optional[Scope]
+    ):
+        # type: (...) -> Optional[str]
+        """Captures an event.
+
+        :param event: A ready-made event that can be directly sent to Sentry.
+
+        :param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+
+        :returns: An event ID. May be `None` if there is no DSN set or of if the SDK decided to discard the event for other reasons. In such situations setting `debug=True` on `init()` may help.
+        """
+        hint = dict(hint or ())  # type: Hint
+
+        if not self._should_capture(event, hint, scope):
+            return None
+
+        profile = event.pop("profile", None)
+
+        event_id = event.get("event_id")
+        if event_id is None:
+            event["event_id"] = event_id = uuid.uuid4().hex
+        event_opt = self._prepare_event(event, hint, scope)
+        if event_opt is None:
+            return None
+
+        # whenever we capture an event we also check if the session needs
+        # to be updated based on that information.
+        session = scope._session if scope else None
+        if session:
+            self._update_session_from_event(session, event)
+
+        is_transaction = event_opt.get("type") == "transaction"
+        is_checkin = event_opt.get("type") == "check_in"
+
+        if (
+            not is_transaction
+            and not is_checkin
+            and not self._should_sample_error(event, hint)
+        ):
+            return None
+
+        attachments = hint.get("attachments")
+
+        trace_context = event_opt.get("contexts", {}).get("trace") or {}
+        dynamic_sampling_context = trace_context.pop("dynamic_sampling_context", {})
+
+        headers = {
+            "event_id": event_opt["event_id"],
+            "sent_at": format_timestamp(datetime.now(timezone.utc)),
+        }  # type: dict[str, object]
+
+        if dynamic_sampling_context:
+            headers["trace"] = dynamic_sampling_context
+
+        envelope = Envelope(headers=headers)
+
+        if is_transaction:
+            if isinstance(profile, Profile):
+                envelope.add_profile(profile.to_json(event_opt, self.options))
+            envelope.add_transaction(event_opt)
+        elif is_checkin:
+            envelope.add_checkin(event_opt)
+        else:
+            envelope.add_event(event_opt)
+
+        for attachment in attachments or ():
+            envelope.add_item(attachment.to_envelope_item())
+
+        return_value = None
+        if self.spotlight:
+            self.spotlight.capture_envelope(envelope)
+            return_value = event_id
+
+        if self.transport is not None:
+            self.transport.capture_envelope(envelope)
+            return_value = event_id
+
+        return return_value
+
+    def capture_log(self, scope, severity_text, severity_number, template, **kwargs):
+        # type: (Scope, str, int, str, **Any) -> None
+        logs_enabled = self.options["_experiments"].get("enable_sentry_logs", False)
+        if not logs_enabled:
+            return
+
+        headers = {
+            "sent_at": format_timestamp(datetime.now(timezone.utc)),
+        }  # type: dict[str, object]
+
+        attrs = {
+            "sentry.message.template": template,
+        }  # type: dict[str, str | bool | float | int]
+
+        kwargs_attributes = kwargs.get("attributes")
+        if kwargs_attributes is not None:
+            attrs.update(kwargs_attributes)
+
+        environment = self.options.get("environment")
+        if environment is not None:
+            attrs["sentry.environment"] = environment
+
+        release = self.options.get("release")
+        if release is not None:
+            attrs["sentry.release"] = release
+
+        span = scope.span
+        if span is not None:
+            attrs["sentry.trace.parent_span_id"] = span.span_id
+
+        for k, v in kwargs.items():
+            attrs[f"sentry.message.parameters.{k}"] = v
+
+        log = {
+            "severity_text": severity_text,
+            "severity_number": severity_number,
+            "body": template.format(**kwargs),
+            "attributes": attrs,
+            "time_unix_nano": time.time_ns(),
+            "trace_id": None,
+        }  # type: Log
+
+        # If debug is enabled, log the log to the console
+        debug = self.options.get("debug", False)
+        if debug:
+            severity_text_to_logging_level = {
+                "trace": logging.DEBUG,
+                "debug": logging.DEBUG,
+                "info": logging.INFO,
+                "warn": logging.WARNING,
+                "error": logging.ERROR,
+                "fatal": logging.CRITICAL,
+            }
+            logger.log(
+                severity_text_to_logging_level.get(severity_text, logging.DEBUG),
+                f'[Sentry Logs] {log["body"]}',
+            )
+
+        propagation_context = scope.get_active_propagation_context()
+        if propagation_context is not None:
+            headers["trace_id"] = propagation_context.trace_id
+            log["trace_id"] = propagation_context.trace_id
+
+        envelope = Envelope(headers=headers)
+
+        before_emit_log = self.options["_experiments"].get("before_emit_log")
+        if before_emit_log is not None:
+            log = before_emit_log(log, {})
+        if log is None:
+            return
+
+        def format_attribute(key, val):
+            # type: (str, int | float | str | bool) -> Any
+            if isinstance(val, bool):
+                return {"key": key, "value": {"boolValue": val}}
+            if isinstance(val, int):
+                return {"key": key, "value": {"intValue": str(val)}}
+            if isinstance(val, float):
+                return {"key": key, "value": {"doubleValue": val}}
+            if isinstance(val, str):
+                return {"key": key, "value": {"stringValue": val}}
+            return {"key": key, "value": {"stringValue": json.dumps(val)}}
+
+        otel_log = {
+            "severityText": log["severity_text"],
+            "severityNumber": log["severity_number"],
+            "body": {"stringValue": log["body"]},
+            "timeUnixNano": str(log["time_unix_nano"]),
+            "attributes": [
+                format_attribute(k, v) for (k, v) in log["attributes"].items()
+            ],
+        }
+
+        if "trace_id" in log:
+            otel_log["traceId"] = log["trace_id"]
+
+        envelope.add_log(otel_log)  # TODO: batch these
+
+        if self.spotlight:
+            self.spotlight.capture_envelope(envelope)
+
+        if self.transport is not None:
+            self.transport.capture_envelope(envelope)
+
+    def capture_session(
+        self, session  # type: Session
+    ):
+        # type: (...) -> None
+        if not session.release:
+            logger.info("Discarded session update because of missing release")
+        else:
+            self.session_flusher.add_session(session)
+
+    if TYPE_CHECKING:
+
+        @overload
+        def get_integration(self, name_or_class):
+            # type: (str) -> Optional[Integration]
+            ...
+
+        @overload
+        def get_integration(self, name_or_class):
+            # type: (type[I]) -> Optional[I]
+            ...
+
+    def get_integration(
+        self, name_or_class  # type: Union[str, Type[Integration]]
+    ):
+        # type: (...) -> Optional[Integration]
+        """Returns the integration for this client by name or class.
+        If the client does not have that integration then `None` is returned.
+        """
+        if isinstance(name_or_class, str):
+            integration_name = name_or_class
+        elif name_or_class.identifier is not None:
+            integration_name = name_or_class.identifier
+        else:
+            raise ValueError("Integration has no name")
+
+        return self.integrations.get(integration_name)
+
+    def close(
+        self,
+        timeout=None,  # type: Optional[float]
+        callback=None,  # type: Optional[Callable[[int, float], None]]
+    ):
+        # type: (...) -> None
+        """
+        Close the client and shut down the transport. Arguments have the same
+        semantics as :py:meth:`Client.flush`.
+        """
+        if self.transport is not None:
+            self.flush(timeout=timeout, callback=callback)
+            self.session_flusher.kill()
+            if self.metrics_aggregator is not None:
+                self.metrics_aggregator.kill()
+            if self.monitor:
+                self.monitor.kill()
+            self.transport.kill()
+            self.transport = None
+
+    def flush(
+        self,
+        timeout=None,  # type: Optional[float]
+        callback=None,  # type: Optional[Callable[[int, float], None]]
+    ):
+        # type: (...) -> None
+        """
+        Wait for the current events to be sent.
+
+        :param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
+
+        :param callback: Is invoked with the number of pending events and the configured timeout.
+        """
+        if self.transport is not None:
+            if timeout is None:
+                timeout = self.options["shutdown_timeout"]
+            self.session_flusher.flush()
+            if self.metrics_aggregator is not None:
+                self.metrics_aggregator.flush()
+            self.transport.flush(timeout=timeout, callback=callback)
+
+    def __enter__(self):
+        # type: () -> _Client
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # type: (Any, Any, Any) -> None
+        self.close()
+
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    # Make mypy, PyCharm and other static analyzers think `get_options` is a
+    # type to have nicer autocompletion for params.
+    #
+    # Use `ClientConstructor` to define the argument types of `init` and
+    # `Dict[str, Any]` to tell static analyzers about the return type.
+
+    class get_options(ClientConstructor, Dict[str, Any]):  # noqa: N801
+        pass
+
+    class Client(ClientConstructor, _Client):
+        pass
+
+else:
+    # Alias `get_options` for actual usage. Go through the lambda indirection
+    # to throw PyCharm off of the weakly typed signature (it would otherwise
+    # discover both the weakly typed signature of `_init` and our faked `init`
+    # type).
+
+    get_options = (lambda: _get_options)()
+    Client = (lambda: _Client)()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/consts.py b/.venv/lib/python3.12/site-packages/sentry_sdk/consts.py
new file mode 100644
index 00000000..f9317242
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/consts.py
@@ -0,0 +1,968 @@
+import itertools
+
+from enum import Enum
+from typing import TYPE_CHECKING
+
+# up top to prevent circular import due to integration import
+DEFAULT_MAX_VALUE_LENGTH = 1024
+
+DEFAULT_MAX_STACK_FRAMES = 100
+DEFAULT_ADD_FULL_STACK = False
+
+
+# Also needs to be at the top to prevent circular import
+class EndpointType(Enum):
+    """
+    The type of an endpoint. This is an enum, rather than a constant, for historical reasons
+    (the old /store endpoint). The enum also preserve future compatibility, in case we ever
+    have a new endpoint.
+    """
+
+    ENVELOPE = "envelope"
+
+
+class CompressionAlgo(Enum):
+    GZIP = "gzip"
+    BROTLI = "br"
+
+
+if TYPE_CHECKING:
+    import sentry_sdk
+
+    from typing import Optional
+    from typing import Callable
+    from typing import Union
+    from typing import List
+    from typing import Type
+    from typing import Dict
+    from typing import Any
+    from typing import Sequence
+    from typing import Tuple
+    from typing_extensions import Literal
+    from typing_extensions import TypedDict
+
+    from sentry_sdk._types import (
+        BreadcrumbProcessor,
+        ContinuousProfilerMode,
+        Event,
+        EventProcessor,
+        Hint,
+        MeasurementUnit,
+        ProfilerMode,
+        TracesSampler,
+        TransactionProcessor,
+        MetricTags,
+        MetricValue,
+    )
+
+    # Experiments are feature flags to enable and disable certain unstable SDK
+    # functionality. Changing them from the defaults (`None`) in production
+    # code is highly discouraged. They are not subject to any stability
+    # guarantees such as the ones from semantic versioning.
+    Experiments = TypedDict(
+        "Experiments",
+        {
+            "max_spans": Optional[int],
+            "max_flags": Optional[int],
+            "record_sql_params": Optional[bool],
+            "continuous_profiling_auto_start": Optional[bool],
+            "continuous_profiling_mode": Optional[ContinuousProfilerMode],
+            "otel_powered_performance": Optional[bool],
+            "transport_zlib_compression_level": Optional[int],
+            "transport_compression_level": Optional[int],
+            "transport_compression_algo": Optional[CompressionAlgo],
+            "transport_num_pools": Optional[int],
+            "transport_http2": Optional[bool],
+            "enable_metrics": Optional[bool],
+            "before_emit_metric": Optional[
+                Callable[[str, MetricValue, MeasurementUnit, MetricTags], bool]
+            ],
+            "metric_code_locations": Optional[bool],
+        },
+        total=False,
+    )
+
+DEFAULT_QUEUE_SIZE = 100
+DEFAULT_MAX_BREADCRUMBS = 100
+MATCH_ALL = r".*"
+
+FALSE_VALUES = [
+    "false",
+    "no",
+    "off",
+    "n",
+    "0",
+]
+
+
+class INSTRUMENTER:
+    SENTRY = "sentry"
+    OTEL = "otel"
+
+
+class SPANDATA:
+    """
+    Additional information describing the type of the span.
+    See: https://develop.sentry.dev/sdk/performance/span-data-conventions/
+    """
+
+    AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
+    """
+    Used to reduce repetitiveness of generated tokens.
+    Example: 0.5
+    """
+
+    AI_PRESENCE_PENALTY = "ai.presence_penalty"
+    """
+    Used to reduce repetitiveness of generated tokens.
+    Example: 0.5
+    """
+
+    AI_INPUT_MESSAGES = "ai.input_messages"
+    """
+    The input messages to an LLM call.
+    Example: [{"role": "user", "message": "hello"}]
+    """
+
+    AI_MODEL_ID = "ai.model_id"
+    """
+    The unique descriptor of the model being execugted
+    Example: gpt-4
+    """
+
+    AI_METADATA = "ai.metadata"
+    """
+    Extra metadata passed to an AI pipeline step.
+    Example: {"executed_function": "add_integers"}
+    """
+
+    AI_TAGS = "ai.tags"
+    """
+    Tags that describe an AI pipeline step.
+    Example: {"executed_function": "add_integers"}
+    """
+
+    AI_STREAMING = "ai.streaming"
+    """
+    Whether or not the AI model call's repsonse was streamed back asynchronously
+    Example: true
+    """
+
+    AI_TEMPERATURE = "ai.temperature"
+    """
+    For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
+    Example: 0.5
+    """
+
+    AI_TOP_P = "ai.top_p"
+    """
+    For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
+    Example: 0.5
+    """
+
+    AI_TOP_K = "ai.top_k"
+    """
+    For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be.
+    Example: 35
+    """
+
+    AI_FUNCTION_CALL = "ai.function_call"
+    """
+    For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
+    """
+
+    AI_TOOL_CALLS = "ai.tool_calls"
+    """
+    For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
+    """
+
+    AI_TOOLS = "ai.tools"
+    """
+    For an AI model call, the functions that are available
+    """
+
+    AI_RESPONSE_FORMAT = "ai.response_format"
+    """
+    For an AI model call, the format of the response
+    """
+
+    AI_LOGIT_BIAS = "ai.response_format"
+    """
+    For an AI model call, the logit bias
+    """
+
+    AI_PREAMBLE = "ai.preamble"
+    """
+    For an AI model call, the preamble parameter.
+    Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
+    Example: "You are now a clown."
+    """
+
+    AI_RAW_PROMPTING = "ai.raw_prompting"
+    """
+    Minimize pre-processing done to the prompt sent to the LLM.
+    Example: true
+    """
+
+    AI_RESPONSES = "ai.responses"
+    """
+    The responses to an AI model call. Always as a list.
+    Example: ["hello", "world"]
+    """
+
+    AI_SEED = "ai.seed"
+    """
+    The seed, ideally models given the same seed and same other parameters will produce the exact same output.
+    Example: 123.45
+    """
+
+    DB_NAME = "db.name"
+    """
+    The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
+    Example: myDatabase
+    """
+
+    DB_USER = "db.user"
+    """
+    The name of the database user used for connecting to the database.
+    See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
+    Example: my_user
+    """
+
+    DB_OPERATION = "db.operation"
+    """
+    The name of the operation being executed, e.g. the MongoDB command name such as findAndModify, or the SQL keyword.
+    See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
+    Example: findAndModify, HMSET, SELECT
+    """
+
+    DB_SYSTEM = "db.system"
+    """
+    An identifier for the database management system (DBMS) product being used.
+    See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
+    Example: postgresql
+    """
+
+    DB_MONGODB_COLLECTION = "db.mongodb.collection"
+    """
+    The MongoDB collection being accessed within the database.
+    See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
+    Example: public.users; customers
+    """
+
+    CACHE_HIT = "cache.hit"
+    """
+    A boolean indicating whether the requested data was found in the cache.
+    Example: true
+    """
+
+    CACHE_ITEM_SIZE = "cache.item_size"
+    """
+    The size of the requested data in bytes.
+    Example: 58
+    """
+
+    CACHE_KEY = "cache.key"
+    """
+    The key of the requested data.
+    Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
+    """
+
+    NETWORK_PEER_ADDRESS = "network.peer.address"
+    """
+    Peer address of the network connection - IP address or Unix domain socket name.
+    Example: 10.1.2.80, /tmp/my.sock, localhost
+    """
+
+    NETWORK_PEER_PORT = "network.peer.port"
+    """
+    Peer port number of the network connection.
+    Example: 6379
+    """
+
+    HTTP_QUERY = "http.query"
+    """
+    The Query string present in the URL.
+    Example: ?foo=bar&bar=baz
+    """
+
+    HTTP_FRAGMENT = "http.fragment"
+    """
+    The Fragments present in the URL.
+    Example: #foo=bar
+    """
+
+    HTTP_METHOD = "http.method"
+    """
+    The HTTP method used.
+    Example: GET
+    """
+
+    HTTP_STATUS_CODE = "http.response.status_code"
+    """
+    The HTTP status code as an integer.
+    Example: 418
+    """
+
+    MESSAGING_DESTINATION_NAME = "messaging.destination.name"
+    """
+    The destination name where the message is being consumed from,
+    e.g. the queue name or topic.
+    """
+
+    MESSAGING_MESSAGE_ID = "messaging.message.id"
+    """
+    The message's identifier.
+    """
+
+    MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
+    """
+    Number of retries/attempts to process a message.
+    """
+
+    MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
+    """
+    The latency between when the task was enqueued and when it was started to be processed.
+    """
+
+    MESSAGING_SYSTEM = "messaging.system"
+    """
+    The messaging system's name, e.g. `kafka`, `aws_sqs`
+    """
+
+    SERVER_ADDRESS = "server.address"
+    """
+    Name of the database host.
+    Example: example.com
+    """
+
+    SERVER_PORT = "server.port"
+    """
+    Logical server port number
+    Example: 80; 8080; 443
+    """
+
+    SERVER_SOCKET_ADDRESS = "server.socket.address"
+    """
+    Physical server IP address or Unix socket address.
+    Example: 10.5.3.2
+    """
+
+    SERVER_SOCKET_PORT = "server.socket.port"
+    """
+    Physical server port.
+    Recommended: If different than server.port.
+    Example: 16456
+    """
+
+    CODE_FILEPATH = "code.filepath"
+    """
+    The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
+    Example: "/app/myapplication/http/handler/server.py"
+    """
+
+    CODE_LINENO = "code.lineno"
+    """
+    The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
+    Example: 42
+    """
+
+    CODE_FUNCTION = "code.function"
+    """
+    The method or function name, or equivalent (usually rightmost part of the code unit's name).
+    Example: "server_request"
+    """
+
+    CODE_NAMESPACE = "code.namespace"
+    """
+    The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
+    Example: "http.handler"
+    """
+
+    THREAD_ID = "thread.id"
+    """
+    Identifier of a thread from where the span originated. This should be a string.
+    Example: "7972576320"
+    """
+
+    THREAD_NAME = "thread.name"
+    """
+    Label identifying a thread from where the span originated. This should be a string.
+    Example: "MainThread"
+    """
+
+    PROFILER_ID = "profiler_id"
+    """
+    Label identifying the profiler id that the span occurred in. This should be a string.
+    Example: "5249fbada8d5416482c2f6e47e337372"
+    """
+
+
+class SPANSTATUS:
+    """
+    The status of a Sentry span.
+
+    See: https://develop.sentry.dev/sdk/event-payloads/contexts/#trace-context
+    """
+
+    ABORTED = "aborted"
+    ALREADY_EXISTS = "already_exists"
+    CANCELLED = "cancelled"
+    DATA_LOSS = "data_loss"
+    DEADLINE_EXCEEDED = "deadline_exceeded"
+    FAILED_PRECONDITION = "failed_precondition"
+    INTERNAL_ERROR = "internal_error"
+    INVALID_ARGUMENT = "invalid_argument"
+    NOT_FOUND = "not_found"
+    OK = "ok"
+    OUT_OF_RANGE = "out_of_range"
+    PERMISSION_DENIED = "permission_denied"
+    RESOURCE_EXHAUSTED = "resource_exhausted"
+    UNAUTHENTICATED = "unauthenticated"
+    UNAVAILABLE = "unavailable"
+    UNIMPLEMENTED = "unimplemented"
+    UNKNOWN_ERROR = "unknown_error"
+
+
+class OP:
+    ANTHROPIC_MESSAGES_CREATE = "ai.messages.create.anthropic"
+    CACHE_GET = "cache.get"
+    CACHE_PUT = "cache.put"
+    COHERE_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.cohere"
+    COHERE_EMBEDDINGS_CREATE = "ai.embeddings.create.cohere"
+    DB = "db"
+    DB_REDIS = "db.redis"
+    EVENT_DJANGO = "event.django"
+    FUNCTION = "function"
+    FUNCTION_AWS = "function.aws"
+    FUNCTION_GCP = "function.gcp"
+    GRAPHQL_EXECUTE = "graphql.execute"
+    GRAPHQL_MUTATION = "graphql.mutation"
+    GRAPHQL_PARSE = "graphql.parse"
+    GRAPHQL_RESOLVE = "graphql.resolve"
+    GRAPHQL_SUBSCRIPTION = "graphql.subscription"
+    GRAPHQL_QUERY = "graphql.query"
+    GRAPHQL_VALIDATE = "graphql.validate"
+    GRPC_CLIENT = "grpc.client"
+    GRPC_SERVER = "grpc.server"
+    HTTP_CLIENT = "http.client"
+    HTTP_CLIENT_STREAM = "http.client.stream"
+    HTTP_SERVER = "http.server"
+    MIDDLEWARE_DJANGO = "middleware.django"
+    MIDDLEWARE_LITESTAR = "middleware.litestar"
+    MIDDLEWARE_LITESTAR_RECEIVE = "middleware.litestar.receive"
+    MIDDLEWARE_LITESTAR_SEND = "middleware.litestar.send"
+    MIDDLEWARE_STARLETTE = "middleware.starlette"
+    MIDDLEWARE_STARLETTE_RECEIVE = "middleware.starlette.receive"
+    MIDDLEWARE_STARLETTE_SEND = "middleware.starlette.send"
+    MIDDLEWARE_STARLITE = "middleware.starlite"
+    MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
+    MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
+    OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
+    OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
+    HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
+        "ai.chat_completions.create.huggingface_hub"
+    )
+    LANGCHAIN_PIPELINE = "ai.pipeline.langchain"
+    LANGCHAIN_RUN = "ai.run.langchain"
+    LANGCHAIN_TOOL = "ai.tool.langchain"
+    LANGCHAIN_AGENT = "ai.agent.langchain"
+    LANGCHAIN_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.langchain"
+    QUEUE_PROCESS = "queue.process"
+    QUEUE_PUBLISH = "queue.publish"
+    QUEUE_SUBMIT_ARQ = "queue.submit.arq"
+    QUEUE_TASK_ARQ = "queue.task.arq"
+    QUEUE_SUBMIT_CELERY = "queue.submit.celery"
+    QUEUE_TASK_CELERY = "queue.task.celery"
+    QUEUE_TASK_RQ = "queue.task.rq"
+    QUEUE_SUBMIT_HUEY = "queue.submit.huey"
+    QUEUE_TASK_HUEY = "queue.task.huey"
+    QUEUE_SUBMIT_RAY = "queue.submit.ray"
+    QUEUE_TASK_RAY = "queue.task.ray"
+    SUBPROCESS = "subprocess"
+    SUBPROCESS_WAIT = "subprocess.wait"
+    SUBPROCESS_COMMUNICATE = "subprocess.communicate"
+    TEMPLATE_RENDER = "template.render"
+    VIEW_RENDER = "view.render"
+    VIEW_RESPONSE_RENDER = "view.response.render"
+    WEBSOCKET_SERVER = "websocket.server"
+    SOCKET_CONNECTION = "socket.connection"
+    SOCKET_DNS = "socket.dns"
+
+
+# This type exists to trick mypy and PyCharm into thinking `init` and `Client`
+# take these arguments (even though they take opaque **kwargs)
+class ClientConstructor:
+
+    def __init__(
+        self,
+        dsn=None,  # type: Optional[str]
+        *,
+        max_breadcrumbs=DEFAULT_MAX_BREADCRUMBS,  # type: int
+        release=None,  # type: Optional[str]
+        environment=None,  # type: Optional[str]
+        server_name=None,  # type: Optional[str]
+        shutdown_timeout=2,  # type: float
+        integrations=[],  # type: Sequence[sentry_sdk.integrations.Integration]  # noqa: B006
+        in_app_include=[],  # type: List[str]  # noqa: B006
+        in_app_exclude=[],  # type: List[str]  # noqa: B006
+        default_integrations=True,  # type: bool
+        dist=None,  # type: Optional[str]
+        transport=None,  # type: Optional[Union[sentry_sdk.transport.Transport, Type[sentry_sdk.transport.Transport], Callable[[Event], None]]]
+        transport_queue_size=DEFAULT_QUEUE_SIZE,  # type: int
+        sample_rate=1.0,  # type: float
+        send_default_pii=None,  # type: Optional[bool]
+        http_proxy=None,  # type: Optional[str]
+        https_proxy=None,  # type: Optional[str]
+        ignore_errors=[],  # type: Sequence[Union[type, str]]  # noqa: B006
+        max_request_body_size="medium",  # type: str
+        socket_options=None,  # type: Optional[List[Tuple[int, int, int | bytes]]]
+        keep_alive=False,  # type: bool
+        before_send=None,  # type: Optional[EventProcessor]
+        before_breadcrumb=None,  # type: Optional[BreadcrumbProcessor]
+        debug=None,  # type: Optional[bool]
+        attach_stacktrace=False,  # type: bool
+        ca_certs=None,  # type: Optional[str]
+        propagate_traces=True,  # type: bool
+        traces_sample_rate=None,  # type: Optional[float]
+        traces_sampler=None,  # type: Optional[TracesSampler]
+        profiles_sample_rate=None,  # type: Optional[float]
+        profiles_sampler=None,  # type: Optional[TracesSampler]
+        profiler_mode=None,  # type: Optional[ProfilerMode]
+        profile_lifecycle="manual",  # type: Literal["manual", "trace"]
+        profile_session_sample_rate=None,  # type: Optional[float]
+        auto_enabling_integrations=True,  # type: bool
+        disabled_integrations=None,  # type: Optional[Sequence[sentry_sdk.integrations.Integration]]
+        auto_session_tracking=True,  # type: bool
+        send_client_reports=True,  # type: bool
+        _experiments={},  # type: Experiments  # noqa: B006
+        proxy_headers=None,  # type: Optional[Dict[str, str]]
+        instrumenter=INSTRUMENTER.SENTRY,  # type: Optional[str]
+        before_send_transaction=None,  # type: Optional[TransactionProcessor]
+        project_root=None,  # type: Optional[str]
+        enable_tracing=None,  # type: Optional[bool]
+        include_local_variables=True,  # type: Optional[bool]
+        include_source_context=True,  # type: Optional[bool]
+        trace_propagation_targets=[  # noqa: B006
+            MATCH_ALL
+        ],  # type: Optional[Sequence[str]]
+        functions_to_trace=[],  # type: Sequence[Dict[str, str]]  # noqa: B006
+        event_scrubber=None,  # type: Optional[sentry_sdk.scrubber.EventScrubber]
+        max_value_length=DEFAULT_MAX_VALUE_LENGTH,  # type: int
+        enable_backpressure_handling=True,  # type: bool
+        error_sampler=None,  # type: Optional[Callable[[Event, Hint], Union[float, bool]]]
+        enable_db_query_source=True,  # type: bool
+        db_query_source_threshold_ms=100,  # type: int
+        spotlight=None,  # type: Optional[Union[bool, str]]
+        cert_file=None,  # type: Optional[str]
+        key_file=None,  # type: Optional[str]
+        custom_repr=None,  # type: Optional[Callable[..., Optional[str]]]
+        add_full_stack=DEFAULT_ADD_FULL_STACK,  # type: bool
+        max_stack_frames=DEFAULT_MAX_STACK_FRAMES,  # type: Optional[int]
+    ):
+        # type: (...) -> None
+        """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`.
+
+        :param dsn: The DSN tells the SDK where to send the events.
+
+            If this option is not set, the SDK will just not send any data.
+
+            The `dsn` config option takes precedence over the environment variable.
+
+            Learn more about `DSN utilization <https://docs.sentry.io/product/sentry-basics/dsn-explainer/#dsn-utilization>`_.
+
+        :param debug: Turns debug mode on or off.
+
+            When `True`, the SDK will attempt to print out debugging information. This can be useful if something goes
+            wrong with event sending.
+
+            The default is always `False`. It's generally not recommended to turn it on in production because of the
+            increase in log output.
+
+            The `debug` config option takes precedence over the environment variable.
+
+        :param release: Sets the release.
+
+            If not set, the SDK will try to automatically configure a release out of the box but it's a better idea to
+            manually set it to guarantee that the release is in sync with your deploy integrations.
+
+            Release names are strings, but some formats are detected by Sentry and might be rendered differently.
+
+            See `the releases documentation <https://docs.sentry.io/platforms/python/configuration/releases/>`_ to learn how the SDK tries to
+            automatically configure a release.
+
+            The `release` config option takes precedence over the environment variable.
+
+            Learn more about how to send release data so Sentry can tell you about regressions between releases and
+            identify the potential source in `the product documentation <https://docs.sentry.io/product/releases/>`_.
+
+        :param environment: Sets the environment. This string is freeform and set to `production` by default.
+
+            A release can be associated with more than one environment to separate them in the UI (think `staging` vs
+            `production` or similar).
+
+            The `environment` config option takes precedence over the environment variable.
+
+        :param dist: The distribution of the application.
+
+            Distributions are used to disambiguate build or deployment variants of the same release of an application.
+
+            The dist can be for example a build number.
+
+        :param sample_rate: Configures the sample rate for error events, in the range of `0.0` to `1.0`.
+
+            The default is `1.0`, which means that 100% of error events will be sent. If set to `0.1`, only 10% of
+            error events will be sent.
+
+            Events are picked randomly.
+
+        :param error_sampler: Dynamically configures the sample rate for error events on a per-event basis.
+
+            This configuration option accepts a function, which takes two parameters (the `event` and the `hint`), and
+            which returns a boolean (indicating whether the event should be sent to Sentry) or a floating-point number
+            between `0.0` and `1.0`, inclusive.
+
+            The number indicates the probability the event is sent to Sentry; the SDK will randomly decide whether to
+            send the event with the given probability.
+
+            If this configuration option is specified, the `sample_rate` option is ignored.
+
+        :param ignore_errors: A list of exception class names that shouldn't be sent to Sentry.
+
+            Errors that are an instance of these exceptions or a subclass of them, will be filtered out before they're
+            sent to Sentry.
+
+            By default, all errors are sent.
+
+        :param max_breadcrumbs: This variable controls the total amount of breadcrumbs that should be captured.
+
+            This defaults to `100`, but you can set this to any number.
+
+            However, you should be aware that Sentry has a `maximum payload size <https://develop.sentry.dev/sdk/data-model/envelopes/#size-limits>`_
+            and any events exceeding that payload size will be dropped.
+
+        :param attach_stacktrace: When enabled, stack traces are automatically attached to all messages logged.
+
+            Stack traces are always attached to exceptions; however, when this option is set, stack traces are also
+            sent with messages.
+
+            This option means that stack traces appear next to all log messages.
+
+            Grouping in Sentry is different for events with stack traces and without. As a result, you will get new
+            groups as you enable or disable this flag for certain events.
+
+        :param send_default_pii: If this flag is enabled, `certain personally identifiable information (PII)
+            <https://docs.sentry.io/platforms/python/data-management/data-collected/>`_ is added by active integrations.
+
+            If you enable this option, be sure to manually remove what you don't want to send using our features for
+            managing `Sensitive Data <https://docs.sentry.io/data-management/sensitive-data/>`_.
+
+        :param event_scrubber: Scrubs the event payload for sensitive information such as cookies, sessions, and
+            passwords from a `denylist`.
+
+            It can additionally be used to scrub from another `pii_denylist` if `send_default_pii` is disabled.
+
+            See how to `configure the scrubber here <https://docs.sentry.io/data-management/sensitive-data/#event-scrubber>`_.
+
+        :param include_source_context: When enabled, source context will be included in events sent to Sentry.
+
+            This source context includes the five lines of code above and below the line of code where an error
+            happened.
+
+        :param include_local_variables: When enabled, the SDK will capture a snapshot of local variables to send with
+            the event to help with debugging.
+
+        :param add_full_stack: When capturing errors, Sentry stack traces typically only include frames that start the
+            moment an error occurs.
+
+            But if the `add_full_stack` option is enabled (set to `True`), all frames from the start of execution will
+            be included in the stack trace sent to Sentry.
+
+        :param max_stack_frames: This option limits the number of stack frames that will be captured when
+            `add_full_stack` is enabled.
+
+        :param server_name: This option can be used to supply a server name.
+
+            When provided, the name of the server is sent along and persisted in the event.
+
+            For many integrations, the server name actually corresponds to the device hostname, even in situations
+            where the machine is not actually a server.
+
+        :param project_root: The full path to the root directory of your application.
+
+            The `project_root` is used to mark frames in a stack trace either as being in your application or outside
+            of the application.
+
+        :param in_app_include: A list of string prefixes of module names that belong to the app.
+
+            This option takes precedence over `in_app_exclude`.
+
+            Sentry differentiates stack frames that are directly related to your application ("in application") from
+            stack frames that come from other packages such as the standard library, frameworks, or other dependencies.
+
+            The application package is automatically marked as `inApp`.
+
+            The difference is visible in [sentry.io](https://sentry.io), where only the "in application" frames are
+            displayed by default.
+
+        :param in_app_exclude: A list of string prefixes of module names that do not belong to the app, but rather to
+            third-party packages.
+
+            Modules considered not part of the app will be hidden from stack traces by default.
+
+            This option can be overridden using `in_app_include`.
+
+        :param max_request_body_size: This parameter controls whether integrations should capture HTTP request bodies.
+            It can be set to one of the following values:
+
+            - `never`: Request bodies are never sent.
+            - `small`: Only small request bodies will be captured. The cutoff for small depends on the SDK (typically
+              4KB).
+            - `medium`: Medium and small requests will be captured (typically 10KB).
+            - `always`: The SDK will always capture the request body as long as Sentry can make sense of it.
+
+            Please note that the Sentry server [limits HTTP request body size](https://develop.sentry.dev/sdk/
+            expected-features/data-handling/#variable-size). The server always enforces its size limit, regardless of
+            how you configure this option.
+
+        :param max_value_length: The number of characters after which the values containing text in the event payload
+            will be truncated.
+
+            WARNING: If the value you set for this is exceptionally large, the event may exceed 1 MiB and will be
+            dropped by Sentry.
+
+        :param ca_certs: A path to an alternative CA bundle file in PEM-format.
+
+        :param send_client_reports: Set this boolean to `False` to disable sending of client reports.
+
+            Client reports allow the client to send status reports about itself to Sentry, such as information about
+            events that were dropped before being sent.
+
+        :param integrations: List of integrations to enable in addition to `auto-enabling integrations (overview)
+            <https://docs.sentry.io/platforms/python/integrations>`_.
+
+            This setting can be used to override the default config options for a specific auto-enabling integration
+            or to add an integration that is not auto-enabled.
+
+        :param disabled_integrations: List of integrations that will be disabled.
+
+            This setting can be used to explicitly turn off specific `auto-enabling integrations (list)
+            <https://docs.sentry.io/platforms/python/integrations/#available-integrations>`_ or
+            `default <https://docs.sentry.io/platforms/python/integrations/default-integrations/>`_ integrations.
+
+        :param auto_enabling_integrations: Configures whether `auto-enabling integrations (configuration)
+            <https://docs.sentry.io/platforms/python/integrations/#available-integrations>`_ should be enabled.
+
+            When set to `False`, no auto-enabling integrations will be enabled by default, even if the corresponding
+            framework/library is detected.
+
+        :param default_integrations: Configures whether `default integrations
+            <https://docs.sentry.io/platforms/python/integrations/default-integrations/>`_ should be enabled.
+
+            Setting `default_integrations` to `False` disables all default integrations **as well as all auto-enabling
+            integrations**, unless they are specifically added in the `integrations` option, described above.
+
+        :param before_send: This function is called with an SDK-specific message or error event object, and can return
+            a modified event object, or `null` to skip reporting the event.
+
+            This can be used, for instance, for manual PII stripping before sending.
+
+            By the time `before_send` is executed, all scope data has already been applied to the event. Further
+            modification of the scope won't have any effect.
+
+        :param before_send_transaction: This function is called with an SDK-specific transaction event object, and can
+            return a modified transaction event object, or `null` to skip reporting the event.
+
+            One way this might be used is for manual PII stripping before sending.
+
+        :param before_breadcrumb: This function is called with an SDK-specific breadcrumb object before the breadcrumb
+            is added to the scope.
+
+            When nothing is returned from the function, the breadcrumb is dropped.
+
+            To pass the breadcrumb through, return the first argument, which contains the breadcrumb object.
+
+            The callback typically gets a second argument (called a "hint") which contains the original object from
+            which the breadcrumb was created to further customize what the breadcrumb should look like.
+
+        :param transport: Switches out the transport used to send events.
+
+            How this works depends on the SDK. It can, for instance, be used to capture events for unit-testing or to
+            send it through some more complex setup that requires proxy authentication.
+
+        :param transport_queue_size: The maximum number of events that will be queued before the transport is forced to
+            flush.
+
+        :param http_proxy: When set, a proxy can be configured that should be used for outbound requests.
+
+            This is also used for HTTPS requests unless a separate `https_proxy` is configured. However, not all SDKs
+            support a separate HTTPS proxy.
+
+            SDKs will attempt to default to the system-wide configured proxy, if possible. For instance, on Unix
+            systems, the `http_proxy` environment variable will be picked up.
+
+        :param https_proxy: Configures a separate proxy for outgoing HTTPS requests.
+
+            This value might not be supported by all SDKs. When not supported the `http-proxy` value is also used for
+            HTTPS requests at all times.
+
+        :param proxy_headers: A dict containing additional proxy headers (usually for authentication) to be forwarded
+            to `urllib3`'s `ProxyManager <https://urllib3.readthedocs.io/en/1.24.3/reference/index.html#urllib3.poolmanager.ProxyManager>`_.
+
+        :param shutdown_timeout: Controls how many seconds to wait before shutting down.
+
+            Sentry SDKs send events from a background queue. This queue is given a certain amount to drain pending
+            events. The default is SDK specific but typically around two seconds.
+
+            Setting this value too low may cause problems for sending events from command line applications.
+
+            Setting the value too high will cause the application to block for a long time for users experiencing
+            network connectivity problems.
+
+        :param keep_alive: Determines whether to keep the connection alive between requests.
+
+            This can be useful in environments where you encounter frequent network issues such as connection resets.
+
+        :param cert_file: Path to the client certificate to use.
+
+            If set, supersedes the `CLIENT_CERT_FILE` environment variable.
+
+        :param key_file: Path to the key file to use.
+
+            If set, supersedes the `CLIENT_KEY_FILE` environment variable.
+
+        :param socket_options: An optional list of socket options to use.
+
+            These provide fine-grained, low-level control over the way the SDK connects to Sentry.
+
+            If provided, the options will override the default `urllib3` `socket options
+            <https://urllib3.readthedocs.io/en/stable/reference/urllib3.connection.html#urllib3.connection.HTTPConnection>`_.
+
+        :param traces_sample_rate: A number between `0` and `1`, controlling the percentage chance a given transaction
+            will be sent to Sentry.
+
+            (`0` represents 0% while `1` represents 100%.) Applies equally to all transactions created in the app.
+
+            Either this or `traces_sampler` must be defined to enable tracing.
+
+            If `traces_sample_rate` is `0`, this means that no new traces will be created. However, if you have
+            another service (for example a JS frontend) that makes requests to your service that include trace
+            information, those traces will be continued and thus transactions will be sent to Sentry.
+
+            If you want to disable all tracing you need to set `traces_sample_rate=None`. In this case, no new traces
+            will be started and no incoming traces will be continued.
+
+        :param traces_sampler: A function responsible for determining the percentage chance a given transaction will be
+            sent to Sentry.
+
+            It will automatically be passed information about the transaction and the context in which it's being
+            created, and must return a number between `0` (0% chance of being sent) and `1` (100% chance of being
+            sent).
+
+            Can also be used for filtering transactions, by returning `0` for those that are unwanted.
+
+            Either this or `traces_sample_rate` must be defined to enable tracing.
+
+        :param trace_propagation_targets: An optional property that controls which downstream services receive tracing
+            data, in the form of a `sentry-trace` and a `baggage` header attached to any outgoing HTTP requests.
+
+            The option may contain a list of strings or regex against which the URLs of outgoing requests are matched.
+
+            If one of the entries in the list matches the URL of an outgoing request, trace data will be attached to
+            that request.
+
+            String entries do not have to be full matches, meaning the URL of a request is matched when it _contains_
+            a string provided through the option.
+
+            If `trace_propagation_targets` is not provided, trace data is attached to every outgoing request from the
+            instrumented client.
+
+        :param functions_to_trace: An optional list of functions that should be set up for tracing.
+
+            For each function in the list, a span will be created when the function is executed.
+
+            Functions in the list are represented as strings containing the fully qualified name of the function.
+
+            This is a convenient option, making it possible to have one central place for configuring what functions
+            to trace, instead of having custom instrumentation scattered all over your code base.
+
+            To learn more, see the `Custom Instrumentation <https://docs.sentry.io/platforms/python/tracing/instrumentation/custom-instrumentation/#define-span-creation-in-a-central-place>`_ documentation.
+
+        :param enable_backpressure_handling: When enabled, a new monitor thread will be spawned to perform health
+            checks on the SDK.
+
+            If the system is unhealthy, the SDK will keep halving the `traces_sample_rate` set by you in 10 second
+            intervals until recovery.
+
+            This down sampling helps ensure that the system stays stable and reduces SDK overhead under high load.
+
+            This option is enabled by default.
+
+        :param enable_db_query_source: When enabled, the source location will be added to database queries.
+
+        :param db_query_source_threshold_ms: The threshold in milliseconds for adding the source location to database
+            queries.
+
+            The query location will be added to the query for queries slower than the specified threshold.
+
+        :param custom_repr: A custom `repr <https://docs.python.org/3/library/functions.html#repr>`_ function to run
+            while serializing an object.
+
+            Use this to control how your custom objects and classes are visible in Sentry.
+
+            Return a string for that repr value to be used or `None` to continue serializing how Sentry would have
+            done it anyway.
+
+        :param profiles_sample_rate: A number between `0` and `1`, controlling the percentage chance a given sampled
+            transaction will be profiled.
+
+            (`0` represents 0% while `1` represents 100%.) Applies equally to all transactions created in the app.
+
+            This is relative to the tracing sample rate - e.g. `0.5` means 50% of sampled transactions will be
+            profiled.
+
+        :param profiles_sampler:
+
+        :param profiler_mode:
+
+        :param profile_lifecycle:
+
+        :param profile_session_sample_rate:
+
+
+        :param enable_tracing:
+
+        :param propagate_traces:
+
+        :param auto_session_tracking:
+
+        :param spotlight:
+
+        :param instrumenter:
+
+        :param _experiments:
+        """
+        pass
+
+
+def _get_default_options():
+    # type: () -> dict[str, Any]
+    import inspect
+
+    a = inspect.getfullargspec(ClientConstructor.__init__)
+    defaults = a.defaults or ()
+    kwonlydefaults = a.kwonlydefaults or {}
+
+    return dict(
+        itertools.chain(
+            zip(a.args[-len(defaults) :], defaults),
+            kwonlydefaults.items(),
+        )
+    )
+
+
+DEFAULT_OPTIONS = _get_default_options()
+del _get_default_options
+
+
+VERSION = "2.24.1"
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/crons/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/__init__.py
new file mode 100644
index 00000000..6f748aae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/__init__.py
@@ -0,0 +1,10 @@
+from sentry_sdk.crons.api import capture_checkin
+from sentry_sdk.crons.consts import MonitorStatus
+from sentry_sdk.crons.decorator import monitor
+
+
+__all__ = [
+    "capture_checkin",
+    "MonitorStatus",
+    "monitor",
+]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/crons/api.py b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/api.py
new file mode 100644
index 00000000..20e95685
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/api.py
@@ -0,0 +1,57 @@
+import uuid
+
+import sentry_sdk
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+    from sentry_sdk._types import Event, MonitorConfig
+
+
+def _create_check_in_event(
+    monitor_slug=None,  # type: Optional[str]
+    check_in_id=None,  # type: Optional[str]
+    status=None,  # type: Optional[str]
+    duration_s=None,  # type: Optional[float]
+    monitor_config=None,  # type: Optional[MonitorConfig]
+):
+    # type: (...) -> Event
+    options = sentry_sdk.get_client().options
+    check_in_id = check_in_id or uuid.uuid4().hex  # type: str
+
+    check_in = {
+        "type": "check_in",
+        "monitor_slug": monitor_slug,
+        "check_in_id": check_in_id,
+        "status": status,
+        "duration": duration_s,
+        "environment": options.get("environment", None),
+        "release": options.get("release", None),
+    }  # type: Event
+
+    if monitor_config:
+        check_in["monitor_config"] = monitor_config
+
+    return check_in
+
+
+def capture_checkin(
+    monitor_slug=None,  # type: Optional[str]
+    check_in_id=None,  # type: Optional[str]
+    status=None,  # type: Optional[str]
+    duration=None,  # type: Optional[float]
+    monitor_config=None,  # type: Optional[MonitorConfig]
+):
+    # type: (...) -> str
+    check_in_event = _create_check_in_event(
+        monitor_slug=monitor_slug,
+        check_in_id=check_in_id,
+        status=status,
+        duration_s=duration,
+        monitor_config=monitor_config,
+    )
+
+    sentry_sdk.capture_event(check_in_event)
+
+    return check_in_event["check_in_id"]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/crons/consts.py b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/consts.py
new file mode 100644
index 00000000..be686b45
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/consts.py
@@ -0,0 +1,4 @@
+class MonitorStatus:
+    IN_PROGRESS = "in_progress"
+    OK = "ok"
+    ERROR = "error"
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/crons/decorator.py b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/decorator.py
new file mode 100644
index 00000000..9af00e61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/crons/decorator.py
@@ -0,0 +1,135 @@
+from functools import wraps
+from inspect import iscoroutinefunction
+
+from sentry_sdk.crons import capture_checkin
+from sentry_sdk.crons.consts import MonitorStatus
+from sentry_sdk.utils import now
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Awaitable, Callable
+    from types import TracebackType
+    from typing import (
+        Any,
+        Optional,
+        ParamSpec,
+        Type,
+        TypeVar,
+        Union,
+        cast,
+        overload,
+    )
+    from sentry_sdk._types import MonitorConfig
+
+    P = ParamSpec("P")
+    R = TypeVar("R")
+
+
+class monitor:  # noqa: N801
+    """
+    Decorator/context manager to capture checkin events for a monitor.
+
+    Usage (as decorator):
+    ```
+    import sentry_sdk
+
+    app = Celery()
+
+    @app.task
+    @sentry_sdk.monitor(monitor_slug='my-fancy-slug')
+    def test(arg):
+        print(arg)
+    ```
+
+    This does not have to be used with Celery, but if you do use it with celery,
+    put the `@sentry_sdk.monitor` decorator below Celery's `@app.task` decorator.
+
+    Usage (as context manager):
+    ```
+    import sentry_sdk
+
+    def test(arg):
+        with sentry_sdk.monitor(monitor_slug='my-fancy-slug'):
+            print(arg)
+    ```
+    """
+
+    def __init__(self, monitor_slug=None, monitor_config=None):
+        # type: (Optional[str], Optional[MonitorConfig]) -> None
+        self.monitor_slug = monitor_slug
+        self.monitor_config = monitor_config
+
+    def __enter__(self):
+        # type: () -> None
+        self.start_timestamp = now()
+        self.check_in_id = capture_checkin(
+            monitor_slug=self.monitor_slug,
+            status=MonitorStatus.IN_PROGRESS,
+            monitor_config=self.monitor_config,
+        )
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        # type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
+        duration_s = now() - self.start_timestamp
+
+        if exc_type is None and exc_value is None and traceback is None:
+            status = MonitorStatus.OK
+        else:
+            status = MonitorStatus.ERROR
+
+        capture_checkin(
+            monitor_slug=self.monitor_slug,
+            check_in_id=self.check_in_id,
+            status=status,
+            duration=duration_s,
+            monitor_config=self.monitor_config,
+        )
+
+    if TYPE_CHECKING:
+
+        @overload
+        def __call__(self, fn):
+            # type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]]
+            # Unfortunately, mypy does not give us any reliable way to type check the
+            # return value of an Awaitable (i.e. async function) for this overload,
+            # since calling iscouroutinefunction narrows the type to Callable[P, Awaitable[Any]].
+            ...
+
+        @overload
+        def __call__(self, fn):
+            # type: (Callable[P, R]) -> Callable[P, R]
+            ...
+
+    def __call__(
+        self,
+        fn,  # type: Union[Callable[P, R], Callable[P, Awaitable[Any]]]
+    ):
+        # type: (...) -> Union[Callable[P, R], Callable[P, Awaitable[Any]]]
+        if iscoroutinefunction(fn):
+            return self._async_wrapper(fn)
+
+        else:
+            if TYPE_CHECKING:
+                fn = cast("Callable[P, R]", fn)
+            return self._sync_wrapper(fn)
+
+    def _async_wrapper(self, fn):
+        # type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]]
+        @wraps(fn)
+        async def inner(*args: "P.args", **kwargs: "P.kwargs"):
+            # type: (...) -> R
+            with self:
+                return await fn(*args, **kwargs)
+
+        return inner
+
+    def _sync_wrapper(self, fn):
+        # type: (Callable[P, R]) -> Callable[P, R]
+        @wraps(fn)
+        def inner(*args: "P.args", **kwargs: "P.kwargs"):
+            # type: (...) -> R
+            with self:
+                return fn(*args, **kwargs)
+
+        return inner
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/debug.py b/.venv/lib/python3.12/site-packages/sentry_sdk/debug.py
new file mode 100644
index 00000000..f740d92d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/debug.py
@@ -0,0 +1,41 @@
+import sys
+import logging
+import warnings
+
+from sentry_sdk import get_client
+from sentry_sdk.client import _client_init_debug
+from sentry_sdk.utils import logger
+from logging import LogRecord
+
+
+class _DebugFilter(logging.Filter):
+    def filter(self, record):
+        # type: (LogRecord) -> bool
+        if _client_init_debug.get(False):
+            return True
+
+        return get_client().options["debug"]
+
+
+def init_debug_support():
+    # type: () -> None
+    if not logger.hasHandlers():
+        configure_logger()
+
+
+def configure_logger():
+    # type: () -> None
+    _handler = logging.StreamHandler(sys.stderr)
+    _handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s"))
+    logger.addHandler(_handler)
+    logger.setLevel(logging.DEBUG)
+    logger.addFilter(_DebugFilter())
+
+
+def configure_debug_hub():
+    # type: () -> None
+    warnings.warn(
+        "configure_debug_hub is deprecated. Please remove calls to it, as it is a no-op.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/envelope.py b/.venv/lib/python3.12/site-packages/sentry_sdk/envelope.py
new file mode 100644
index 00000000..044d2820
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/envelope.py
@@ -0,0 +1,361 @@
+import io
+import json
+import mimetypes
+
+from sentry_sdk.session import Session
+from sentry_sdk.utils import json_dumps, capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+    from typing import Union
+    from typing import Dict
+    from typing import List
+    from typing import Iterator
+
+    from sentry_sdk._types import Event, EventDataCategory
+
+
+def parse_json(data):
+    # type: (Union[bytes, str]) -> Any
+    # on some python 3 versions this needs to be bytes
+    if isinstance(data, bytes):
+        data = data.decode("utf-8", "replace")
+    return json.loads(data)
+
+
+class Envelope:
+    """
+    Represents a Sentry Envelope. The calling code is responsible for adhering to the constraints
+    documented in the Sentry docs: https://develop.sentry.dev/sdk/envelopes/#data-model. In particular,
+    each envelope may have at most one Item with type "event" or "transaction" (but not both).
+    """
+
+    def __init__(
+        self,
+        headers=None,  # type: Optional[Dict[str, Any]]
+        items=None,  # type: Optional[List[Item]]
+    ):
+        # type: (...) -> None
+        if headers is not None:
+            headers = dict(headers)
+        self.headers = headers or {}
+        if items is None:
+            items = []
+        else:
+            items = list(items)
+        self.items = items
+
+    @property
+    def description(self):
+        # type: (...) -> str
+        return "envelope with %s items (%s)" % (
+            len(self.items),
+            ", ".join(x.data_category for x in self.items),
+        )
+
+    def add_event(
+        self, event  # type: Event
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=event), type="event"))
+
+    def add_transaction(
+        self, transaction  # type: Event
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=transaction), type="transaction"))
+
+    def add_profile(
+        self, profile  # type: Any
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=profile), type="profile"))
+
+    def add_profile_chunk(
+        self, profile_chunk  # type: Any
+    ):
+        # type: (...) -> None
+        self.add_item(
+            Item(
+                payload=PayloadRef(json=profile_chunk),
+                type="profile_chunk",
+                headers={"platform": profile_chunk.get("platform", "python")},
+            )
+        )
+
+    def add_checkin(
+        self, checkin  # type: Any
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=checkin), type="check_in"))
+
+    def add_session(
+        self, session  # type: Union[Session, Any]
+    ):
+        # type: (...) -> None
+        if isinstance(session, Session):
+            session = session.to_json()
+        self.add_item(Item(payload=PayloadRef(json=session), type="session"))
+
+    def add_sessions(
+        self, sessions  # type: Any
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=sessions), type="sessions"))
+
+    def add_log(
+        self, log  # type: Any
+    ):
+        # type: (...) -> None
+        self.add_item(Item(payload=PayloadRef(json=log), type="otel_log"))
+
+    def add_item(
+        self, item  # type: Item
+    ):
+        # type: (...) -> None
+        self.items.append(item)
+
+    def get_event(self):
+        # type: (...) -> Optional[Event]
+        for items in self.items:
+            event = items.get_event()
+            if event is not None:
+                return event
+        return None
+
+    def get_transaction_event(self):
+        # type: (...) -> Optional[Event]
+        for item in self.items:
+            event = item.get_transaction_event()
+            if event is not None:
+                return event
+        return None
+
+    def __iter__(self):
+        # type: (...) -> Iterator[Item]
+        return iter(self.items)
+
+    def serialize_into(
+        self, f  # type: Any
+    ):
+        # type: (...) -> None
+        f.write(json_dumps(self.headers))
+        f.write(b"\n")
+        for item in self.items:
+            item.serialize_into(f)
+
+    def serialize(self):
+        # type: (...) -> bytes
+        out = io.BytesIO()
+        self.serialize_into(out)
+        return out.getvalue()
+
+    @classmethod
+    def deserialize_from(
+        cls, f  # type: Any
+    ):
+        # type: (...) -> Envelope
+        headers = parse_json(f.readline())
+        items = []
+        while 1:
+            item = Item.deserialize_from(f)
+            if item is None:
+                break
+            items.append(item)
+        return cls(headers=headers, items=items)
+
+    @classmethod
+    def deserialize(
+        cls, bytes  # type: bytes
+    ):
+        # type: (...) -> Envelope
+        return cls.deserialize_from(io.BytesIO(bytes))
+
+    def __repr__(self):
+        # type: (...) -> str
+        return "<Envelope headers=%r items=%r>" % (self.headers, self.items)
+
+
+class PayloadRef:
+    def __init__(
+        self,
+        bytes=None,  # type: Optional[bytes]
+        path=None,  # type: Optional[Union[bytes, str]]
+        json=None,  # type: Optional[Any]
+    ):
+        # type: (...) -> None
+        self.json = json
+        self.bytes = bytes
+        self.path = path
+
+    def get_bytes(self):
+        # type: (...) -> bytes
+        if self.bytes is None:
+            if self.path is not None:
+                with capture_internal_exceptions():
+                    with open(self.path, "rb") as f:
+                        self.bytes = f.read()
+            elif self.json is not None:
+                self.bytes = json_dumps(self.json)
+        return self.bytes or b""
+
+    @property
+    def inferred_content_type(self):
+        # type: (...) -> str
+        if self.json is not None:
+            return "application/json"
+        elif self.path is not None:
+            path = self.path
+            if isinstance(path, bytes):
+                path = path.decode("utf-8", "replace")
+            ty = mimetypes.guess_type(path)[0]
+            if ty:
+                return ty
+        return "application/octet-stream"
+
+    def __repr__(self):
+        # type: (...) -> str
+        return "<Payload %r>" % (self.inferred_content_type,)
+
+
+class Item:
+    def __init__(
+        self,
+        payload,  # type: Union[bytes, str, PayloadRef]
+        headers=None,  # type: Optional[Dict[str, Any]]
+        type=None,  # type: Optional[str]
+        content_type=None,  # type: Optional[str]
+        filename=None,  # type: Optional[str]
+    ):
+        if headers is not None:
+            headers = dict(headers)
+        elif headers is None:
+            headers = {}
+        self.headers = headers
+        if isinstance(payload, bytes):
+            payload = PayloadRef(bytes=payload)
+        elif isinstance(payload, str):
+            payload = PayloadRef(bytes=payload.encode("utf-8"))
+        else:
+            payload = payload
+
+        if filename is not None:
+            headers["filename"] = filename
+        if type is not None:
+            headers["type"] = type
+        if content_type is not None:
+            headers["content_type"] = content_type
+        elif "content_type" not in headers:
+            headers["content_type"] = payload.inferred_content_type
+
+        self.payload = payload
+
+    def __repr__(self):
+        # type: (...) -> str
+        return "<Item headers=%r payload=%r data_category=%r>" % (
+            self.headers,
+            self.payload,
+            self.data_category,
+        )
+
+    @property
+    def type(self):
+        # type: (...) -> Optional[str]
+        return self.headers.get("type")
+
+    @property
+    def data_category(self):
+        # type: (...) -> EventDataCategory
+        ty = self.headers.get("type")
+        if ty == "session" or ty == "sessions":
+            return "session"
+        elif ty == "attachment":
+            return "attachment"
+        elif ty == "transaction":
+            return "transaction"
+        elif ty == "event":
+            return "error"
+        elif ty == "otel_log":
+            return "log"
+        elif ty == "client_report":
+            return "internal"
+        elif ty == "profile":
+            return "profile"
+        elif ty == "profile_chunk":
+            return "profile_chunk"
+        elif ty == "statsd":
+            return "metric_bucket"
+        elif ty == "check_in":
+            return "monitor"
+        else:
+            return "default"
+
+    def get_bytes(self):
+        # type: (...) -> bytes
+        return self.payload.get_bytes()
+
+    def get_event(self):
+        # type: (...) -> Optional[Event]
+        """
+        Returns an error event if there is one.
+        """
+        if self.type == "event" and self.payload.json is not None:
+            return self.payload.json
+        return None
+
+    def get_transaction_event(self):
+        # type: (...) -> Optional[Event]
+        if self.type == "transaction" and self.payload.json is not None:
+            return self.payload.json
+        return None
+
+    def serialize_into(
+        self, f  # type: Any
+    ):
+        # type: (...) -> None
+        headers = dict(self.headers)
+        bytes = self.get_bytes()
+        headers["length"] = len(bytes)
+        f.write(json_dumps(headers))
+        f.write(b"\n")
+        f.write(bytes)
+        f.write(b"\n")
+
+    def serialize(self):
+        # type: (...) -> bytes
+        out = io.BytesIO()
+        self.serialize_into(out)
+        return out.getvalue()
+
+    @classmethod
+    def deserialize_from(
+        cls, f  # type: Any
+    ):
+        # type: (...) -> Optional[Item]
+        line = f.readline().rstrip()
+        if not line:
+            return None
+        headers = parse_json(line)
+        length = headers.get("length")
+        if length is not None:
+            payload = f.read(length)
+            f.readline()
+        else:
+            # if no length was specified we need to read up to the end of line
+            # and remove it (if it is present, i.e. not the very last char in an eof terminated envelope)
+            payload = f.readline().rstrip(b"\n")
+        if headers.get("type") in ("event", "transaction", "metric_buckets"):
+            rv = cls(headers=headers, payload=PayloadRef(json=parse_json(payload)))
+        else:
+            rv = cls(headers=headers, payload=payload)
+        return rv
+
+    @classmethod
+    def deserialize(
+        cls, bytes  # type: bytes
+    ):
+        # type: (...) -> Optional[Item]
+        return cls.deserialize_from(io.BytesIO(bytes))
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/feature_flags.py b/.venv/lib/python3.12/site-packages/sentry_sdk/feature_flags.py
new file mode 100644
index 00000000..a0b13383
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/feature_flags.py
@@ -0,0 +1,68 @@
+import copy
+import sentry_sdk
+from sentry_sdk._lru_cache import LRUCache
+from threading import Lock
+
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+    from typing import TypedDict
+
+    FlagData = TypedDict("FlagData", {"flag": str, "result": bool})
+
+
+DEFAULT_FLAG_CAPACITY = 100
+
+
+class FlagBuffer:
+
+    def __init__(self, capacity):
+        # type: (int) -> None
+        self.capacity = capacity
+        self.lock = Lock()
+
+        # Buffer is private. The name is mangled to discourage use. If you use this attribute
+        # directly you're on your own!
+        self.__buffer = LRUCache(capacity)
+
+    def clear(self):
+        # type: () -> None
+        self.__buffer = LRUCache(self.capacity)
+
+    def __deepcopy__(self, memo):
+        # type: (dict[int, Any]) -> FlagBuffer
+        with self.lock:
+            buffer = FlagBuffer(self.capacity)
+            buffer.__buffer = copy.deepcopy(self.__buffer, memo)
+            return buffer
+
+    def get(self):
+        # type: () -> list[FlagData]
+        with self.lock:
+            return [
+                {"flag": key, "result": value} for key, value in self.__buffer.get_all()
+            ]
+
+    def set(self, flag, result):
+        # type: (str, bool) -> None
+        if isinstance(result, FlagBuffer):
+            # If someone were to insert `self` into `self` this would create a circular dependency
+            # on the lock. This is of course a deadlock. However, this is far outside the expected
+            # usage of this class. We guard against it here for completeness and to document this
+            # expected failure mode.
+            raise ValueError(
+                "FlagBuffer instances can not be inserted into the dictionary."
+            )
+
+        with self.lock:
+            self.__buffer.set(flag, result)
+
+
+def add_feature_flag(flag, result):
+    # type: (str, bool) -> None
+    """
+    Records a flag and its value to be sent on subsequent error events.
+    We recommend you do this on flag evaluations. Flags are buffered per Sentry scope.
+    """
+    flags = sentry_sdk.get_current_scope().flags
+    flags.set(flag, result)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/hub.py b/.venv/lib/python3.12/site-packages/sentry_sdk/hub.py
new file mode 100644
index 00000000..7fda9202
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/hub.py
@@ -0,0 +1,739 @@
+import warnings
+from contextlib import contextmanager
+
+from sentry_sdk import (
+    get_client,
+    get_global_scope,
+    get_isolation_scope,
+    get_current_scope,
+)
+from sentry_sdk._compat import with_metaclass
+from sentry_sdk.consts import INSTRUMENTER
+from sentry_sdk.scope import _ScopeManager
+from sentry_sdk.client import Client
+from sentry_sdk.tracing import (
+    NoOpSpan,
+    Span,
+    Transaction,
+)
+
+from sentry_sdk.utils import (
+    logger,
+    ContextVar,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import ContextManager
+    from typing import Dict
+    from typing import Generator
+    from typing import List
+    from typing import Optional
+    from typing import overload
+    from typing import Tuple
+    from typing import Type
+    from typing import TypeVar
+    from typing import Union
+
+    from typing_extensions import Unpack
+
+    from sentry_sdk.scope import Scope
+    from sentry_sdk.client import BaseClient
+    from sentry_sdk.integrations import Integration
+    from sentry_sdk._types import (
+        Event,
+        Hint,
+        Breadcrumb,
+        BreadcrumbHint,
+        ExcInfo,
+        LogLevelStr,
+        SamplingContext,
+    )
+    from sentry_sdk.tracing import TransactionKwargs
+
+    T = TypeVar("T")
+
+else:
+
+    def overload(x):
+        # type: (T) -> T
+        return x
+
+
+class SentryHubDeprecationWarning(DeprecationWarning):
+    """
+    A custom deprecation warning to inform users that the Hub is deprecated.
+    """
+
+    _MESSAGE = (
+        "`sentry_sdk.Hub` is deprecated and will be removed in a future major release. "
+        "Please consult our 1.x to 2.x migration guide for details on how to migrate "
+        "`Hub` usage to the new API: "
+        "https://docs.sentry.io/platforms/python/migration/1.x-to-2.x"
+    )
+
+    def __init__(self, *_):
+        # type: (*object) -> None
+        super().__init__(self._MESSAGE)
+
+
+@contextmanager
+def _suppress_hub_deprecation_warning():
+    # type: () -> Generator[None, None, None]
+    """Utility function to suppress deprecation warnings for the Hub."""
+    with warnings.catch_warnings():
+        warnings.filterwarnings("ignore", category=SentryHubDeprecationWarning)
+        yield
+
+
+_local = ContextVar("sentry_current_hub")
+
+
+class HubMeta(type):
+    @property
+    def current(cls):
+        # type: () -> Hub
+        """Returns the current instance of the hub."""
+        warnings.warn(SentryHubDeprecationWarning(), stacklevel=2)
+        rv = _local.get(None)
+        if rv is None:
+            with _suppress_hub_deprecation_warning():
+                # This will raise a deprecation warning; suppress it since we already warned above.
+                rv = Hub(GLOBAL_HUB)
+            _local.set(rv)
+        return rv
+
+    @property
+    def main(cls):
+        # type: () -> Hub
+        """Returns the main instance of the hub."""
+        warnings.warn(SentryHubDeprecationWarning(), stacklevel=2)
+        return GLOBAL_HUB
+
+
+class Hub(with_metaclass(HubMeta)):  # type: ignore
+    """
+    .. deprecated:: 2.0.0
+        The Hub is deprecated. Its functionality will be merged into :py:class:`sentry_sdk.scope.Scope`.
+
+    The hub wraps the concurrency management of the SDK.  Each thread has
+    its own hub but the hub might transfer with the flow of execution if
+    context vars are available.
+
+    If the hub is used with a with statement it's temporarily activated.
+    """
+
+    _stack = None  # type: List[Tuple[Optional[Client], Scope]]
+    _scope = None  # type: Optional[Scope]
+
+    # Mypy doesn't pick up on the metaclass.
+
+    if TYPE_CHECKING:
+        current = None  # type: Hub
+        main = None  # type: Hub
+
+    def __init__(
+        self,
+        client_or_hub=None,  # type: Optional[Union[Hub, Client]]
+        scope=None,  # type: Optional[Any]
+    ):
+        # type: (...) -> None
+        warnings.warn(SentryHubDeprecationWarning(), stacklevel=2)
+
+        current_scope = None
+
+        if isinstance(client_or_hub, Hub):
+            client = get_client()
+            if scope is None:
+                # hub cloning is going on, we use a fork of the current/isolation scope for context manager
+                scope = get_isolation_scope().fork()
+                current_scope = get_current_scope().fork()
+        else:
+            client = client_or_hub  # type: ignore
+            get_global_scope().set_client(client)
+
+        if scope is None:  # so there is no Hub cloning going on
+            # just the current isolation scope is used for context manager
+            scope = get_isolation_scope()
+            current_scope = get_current_scope()
+
+        if current_scope is None:
+            # just the current current scope is used for context manager
+            current_scope = get_current_scope()
+
+        self._stack = [(client, scope)]  # type: ignore
+        self._last_event_id = None  # type: Optional[str]
+        self._old_hubs = []  # type: List[Hub]
+
+        self._old_current_scopes = []  # type: List[Scope]
+        self._old_isolation_scopes = []  # type: List[Scope]
+        self._current_scope = current_scope  # type: Scope
+        self._scope = scope  # type: Scope
+
+    def __enter__(self):
+        # type: () -> Hub
+        self._old_hubs.append(Hub.current)
+        _local.set(self)
+
+        current_scope = get_current_scope()
+        self._old_current_scopes.append(current_scope)
+        scope._current_scope.set(self._current_scope)
+
+        isolation_scope = get_isolation_scope()
+        self._old_isolation_scopes.append(isolation_scope)
+        scope._isolation_scope.set(self._scope)
+
+        return self
+
+    def __exit__(
+        self,
+        exc_type,  # type: Optional[type]
+        exc_value,  # type: Optional[BaseException]
+        tb,  # type: Optional[Any]
+    ):
+        # type: (...) -> None
+        old = self._old_hubs.pop()
+        _local.set(old)
+
+        old_current_scope = self._old_current_scopes.pop()
+        scope._current_scope.set(old_current_scope)
+
+        old_isolation_scope = self._old_isolation_scopes.pop()
+        scope._isolation_scope.set(old_isolation_scope)
+
+    def run(
+        self, callback  # type: Callable[[], T]
+    ):
+        # type: (...) -> T
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+
+        Runs a callback in the context of the hub.  Alternatively the
+        with statement can be used on the hub directly.
+        """
+        with self:
+            return callback()
+
+    def get_integration(
+        self, name_or_class  # type: Union[str, Type[Integration]]
+    ):
+        # type: (...) -> Any
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.client._Client.get_integration` instead.
+
+        Returns the integration for this hub by name or class.  If there
+        is no client bound or the client does not have that integration
+        then `None` is returned.
+
+        If the return value is not `None` the hub is guaranteed to have a
+        client attached.
+        """
+        return get_client().get_integration(name_or_class)
+
+    @property
+    def client(self):
+        # type: () -> Optional[BaseClient]
+        """
+        .. deprecated:: 2.0.0
+            This property is deprecated and will be removed in a future release.
+            Please use :py:func:`sentry_sdk.api.get_client` instead.
+
+        Returns the current client on the hub.
+        """
+        client = get_client()
+
+        if not client.is_active():
+            return None
+
+        return client
+
+    @property
+    def scope(self):
+        # type: () -> Scope
+        """
+        .. deprecated:: 2.0.0
+            This property is deprecated and will be removed in a future release.
+            Returns the current scope on the hub.
+        """
+        return get_isolation_scope()
+
+    def last_event_id(self):
+        # type: () -> Optional[str]
+        """
+        Returns the last event ID.
+
+        .. deprecated:: 1.40.5
+            This function is deprecated and will be removed in a future release. The functions `capture_event`, `capture_message`, and `capture_exception` return the event ID directly.
+        """
+        logger.warning(
+            "Deprecated: last_event_id is deprecated. This will be removed in the future. The functions `capture_event`, `capture_message`, and `capture_exception` return the event ID directly."
+        )
+        return self._last_event_id
+
+    def bind_client(
+        self, new  # type: Optional[BaseClient]
+    ):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.set_client` instead.
+
+        Binds a new client to the hub.
+        """
+        get_global_scope().set_client(new)
+
+    def capture_event(self, event, hint=None, scope=None, **scope_kwargs):
+        # type: (Event, Optional[Hint], Optional[Scope], Any) -> Optional[str]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.capture_event` instead.
+
+        Captures an event.
+
+        Alias of :py:meth:`sentry_sdk.Scope.capture_event`.
+
+        :param event: A ready-made event that can be directly sent to Sentry.
+
+        :param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+        """
+        last_event_id = get_current_scope().capture_event(
+            event, hint, scope=scope, **scope_kwargs
+        )
+
+        is_transaction = event.get("type") == "transaction"
+        if last_event_id is not None and not is_transaction:
+            self._last_event_id = last_event_id
+
+        return last_event_id
+
+    def capture_message(self, message, level=None, scope=None, **scope_kwargs):
+        # type: (str, Optional[LogLevelStr], Optional[Scope], Any) -> Optional[str]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.capture_message` instead.
+
+        Captures a message.
+
+        Alias of :py:meth:`sentry_sdk.Scope.capture_message`.
+
+        :param message: The string to send as the message to Sentry.
+
+        :param level: If no level is provided, the default level is `info`.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.client._Client.capture_event`).
+        """
+        last_event_id = get_current_scope().capture_message(
+            message, level=level, scope=scope, **scope_kwargs
+        )
+
+        if last_event_id is not None:
+            self._last_event_id = last_event_id
+
+        return last_event_id
+
+    def capture_exception(self, error=None, scope=None, **scope_kwargs):
+        # type: (Optional[Union[BaseException, ExcInfo]], Optional[Scope], Any) -> Optional[str]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.capture_exception` instead.
+
+        Captures an exception.
+
+        Alias of :py:meth:`sentry_sdk.Scope.capture_exception`.
+
+        :param error: An exception to capture. If `None`, `sys.exc_info()` will be used.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.client._Client.capture_event`).
+        """
+        last_event_id = get_current_scope().capture_exception(
+            error, scope=scope, **scope_kwargs
+        )
+
+        if last_event_id is not None:
+            self._last_event_id = last_event_id
+
+        return last_event_id
+
+    def add_breadcrumb(self, crumb=None, hint=None, **kwargs):
+        # type: (Optional[Breadcrumb], Optional[BreadcrumbHint], Any) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.add_breadcrumb` instead.
+
+        Adds a breadcrumb.
+
+        :param crumb: Dictionary with the data as the sentry v7/v8 protocol expects.
+
+        :param hint: An optional value that can be used by `before_breadcrumb`
+            to customize the breadcrumbs that are emitted.
+        """
+        get_isolation_scope().add_breadcrumb(crumb, hint, **kwargs)
+
+    def start_span(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
+        # type: (str, Any) -> Span
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.start_span` instead.
+
+        Start a span whose parent is the currently active span or transaction, if any.
+
+        The return value is a :py:class:`sentry_sdk.tracing.Span` instance,
+        typically used as a context manager to start and stop timing in a `with`
+        block.
+
+        Only spans contained in a transaction are sent to Sentry. Most
+        integrations start a transaction at the appropriate time, for example
+        for every incoming HTTP request. Use
+        :py:meth:`sentry_sdk.start_transaction` to start a new transaction when
+        one is not already in progress.
+
+        For supported `**kwargs` see :py:class:`sentry_sdk.tracing.Span`.
+        """
+        scope = get_current_scope()
+        return scope.start_span(instrumenter=instrumenter, **kwargs)
+
+    def start_transaction(
+        self,
+        transaction=None,
+        instrumenter=INSTRUMENTER.SENTRY,
+        custom_sampling_context=None,
+        **kwargs
+    ):
+        # type: (Optional[Transaction], str, Optional[SamplingContext], Unpack[TransactionKwargs]) -> Union[Transaction, NoOpSpan]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.start_transaction` instead.
+
+        Start and return a transaction.
+
+        Start an existing transaction if given, otherwise create and start a new
+        transaction with kwargs.
+
+        This is the entry point to manual tracing instrumentation.
+
+        A tree structure can be built by adding child spans to the transaction,
+        and child spans to other spans. To start a new child span within the
+        transaction or any span, call the respective `.start_child()` method.
+
+        Every child span must be finished before the transaction is finished,
+        otherwise the unfinished spans are discarded.
+
+        When used as context managers, spans and transactions are automatically
+        finished at the end of the `with` block. If not using context managers,
+        call the `.finish()` method.
+
+        When the transaction is finished, it will be sent to Sentry with all its
+        finished child spans.
+
+        For supported `**kwargs` see :py:class:`sentry_sdk.tracing.Transaction`.
+        """
+        scope = get_current_scope()
+
+        # For backwards compatibility, we allow passing the scope as the hub.
+        # We need a major release to make this nice. (if someone searches the code: deprecated)
+        # Type checking disabled for this line because deprecated keys are not allowed in the type signature.
+        kwargs["hub"] = scope  # type: ignore
+
+        return scope.start_transaction(
+            transaction, instrumenter, custom_sampling_context, **kwargs
+        )
+
+    def continue_trace(self, environ_or_headers, op=None, name=None, source=None):
+        # type: (Dict[str, Any], Optional[str], Optional[str], Optional[str]) -> Transaction
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.continue_trace` instead.
+
+        Sets the propagation context from environment or headers and returns a transaction.
+        """
+        return get_isolation_scope().continue_trace(
+            environ_or_headers=environ_or_headers, op=op, name=name, source=source
+        )
+
+    @overload
+    def push_scope(
+        self, callback=None  # type: Optional[None]
+    ):
+        # type: (...) -> ContextManager[Scope]
+        pass
+
+    @overload
+    def push_scope(  # noqa: F811
+        self, callback  # type: Callable[[Scope], None]
+    ):
+        # type: (...) -> None
+        pass
+
+    def push_scope(  # noqa
+        self,
+        callback=None,  # type: Optional[Callable[[Scope], None]]
+        continue_trace=True,  # type: bool
+    ):
+        # type: (...) -> Optional[ContextManager[Scope]]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+
+        Pushes a new layer on the scope stack.
+
+        :param callback: If provided, this method pushes a scope, calls
+            `callback`, and pops the scope again.
+
+        :returns: If no `callback` is provided, a context manager that should
+            be used to pop the scope again.
+        """
+        if callback is not None:
+            with self.push_scope() as scope:
+                callback(scope)
+            return None
+
+        return _ScopeManager(self)
+
+    def pop_scope_unsafe(self):
+        # type: () -> Tuple[Optional[Client], Scope]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+
+        Pops a scope layer from the stack.
+
+        Try to use the context manager :py:meth:`push_scope` instead.
+        """
+        rv = self._stack.pop()
+        assert self._stack, "stack must have at least one layer"
+        return rv
+
+    @overload
+    def configure_scope(
+        self, callback=None  # type: Optional[None]
+    ):
+        # type: (...) -> ContextManager[Scope]
+        pass
+
+    @overload
+    def configure_scope(  # noqa: F811
+        self, callback  # type: Callable[[Scope], None]
+    ):
+        # type: (...) -> None
+        pass
+
+    def configure_scope(  # noqa
+        self,
+        callback=None,  # type: Optional[Callable[[Scope], None]]
+        continue_trace=True,  # type: bool
+    ):
+        # type: (...) -> Optional[ContextManager[Scope]]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+
+        Reconfigures the scope.
+
+        :param callback: If provided, call the callback with the current scope.
+
+        :returns: If no callback is provided, returns a context manager that returns the scope.
+        """
+        scope = get_isolation_scope()
+
+        if continue_trace:
+            scope.generate_propagation_context()
+
+        if callback is not None:
+            # TODO: used to return None when client is None. Check if this changes behavior.
+            callback(scope)
+
+            return None
+
+        @contextmanager
+        def inner():
+            # type: () -> Generator[Scope, None, None]
+            yield scope
+
+        return inner()
+
+    def start_session(
+        self, session_mode="application"  # type: str
+    ):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.start_session` instead.
+
+        Starts a new session.
+        """
+        get_isolation_scope().start_session(
+            session_mode=session_mode,
+        )
+
+    def end_session(self):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.end_session` instead.
+
+        Ends the current session if there is one.
+        """
+        get_isolation_scope().end_session()
+
+    def stop_auto_session_tracking(self):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.stop_auto_session_tracking` instead.
+
+        Stops automatic session tracking.
+
+        This temporarily session tracking for the current scope when called.
+        To resume session tracking call `resume_auto_session_tracking`.
+        """
+        get_isolation_scope().stop_auto_session_tracking()
+
+    def resume_auto_session_tracking(self):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.resume_auto_session_tracking` instead.
+
+        Resumes automatic session tracking for the current scope if
+        disabled earlier.  This requires that generally automatic session
+        tracking is enabled.
+        """
+        get_isolation_scope().resume_auto_session_tracking()
+
+    def flush(
+        self,
+        timeout=None,  # type: Optional[float]
+        callback=None,  # type: Optional[Callable[[int, float], None]]
+    ):
+        # type: (...) -> None
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.client._Client.flush` instead.
+
+        Alias for :py:meth:`sentry_sdk.client._Client.flush`
+        """
+        return get_client().flush(timeout=timeout, callback=callback)
+
+    def get_traceparent(self):
+        # type: () -> Optional[str]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.get_traceparent` instead.
+
+        Returns the traceparent either from the active span or from the scope.
+        """
+        current_scope = get_current_scope()
+        traceparent = current_scope.get_traceparent()
+
+        if traceparent is None:
+            isolation_scope = get_isolation_scope()
+            traceparent = isolation_scope.get_traceparent()
+
+        return traceparent
+
+    def get_baggage(self):
+        # type: () -> Optional[str]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.get_baggage` instead.
+
+        Returns Baggage either from the active span or from the scope.
+        """
+        current_scope = get_current_scope()
+        baggage = current_scope.get_baggage()
+
+        if baggage is None:
+            isolation_scope = get_isolation_scope()
+            baggage = isolation_scope.get_baggage()
+
+        if baggage is not None:
+            return baggage.serialize()
+
+        return None
+
+    def iter_trace_propagation_headers(self, span=None):
+        # type: (Optional[Span]) -> Generator[Tuple[str, str], None, None]
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.iter_trace_propagation_headers` instead.
+
+        Return HTTP headers which allow propagation of trace data. Data taken
+        from the span representing the request, if available, or the current
+        span on the scope if not.
+        """
+        return get_current_scope().iter_trace_propagation_headers(
+            span=span,
+        )
+
+    def trace_propagation_meta(self, span=None):
+        # type: (Optional[Span]) -> str
+        """
+        .. deprecated:: 2.0.0
+            This function is deprecated and will be removed in a future release.
+            Please use :py:meth:`sentry_sdk.Scope.trace_propagation_meta` instead.
+
+        Return meta tags which should be injected into HTML templates
+        to allow propagation of trace information.
+        """
+        if span is not None:
+            logger.warning(
+                "The parameter `span` in trace_propagation_meta() is deprecated and will be removed in the future."
+            )
+
+        return get_current_scope().trace_propagation_meta(
+            span=span,
+        )
+
+
+with _suppress_hub_deprecation_warning():
+    # Suppress deprecation warning for the Hub here, since we still always
+    # import this module.
+    GLOBAL_HUB = Hub()
+_local.set(GLOBAL_HUB)
+
+
+# Circular imports
+from sentry_sdk import scope
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/__init__.py
new file mode 100644
index 00000000..9bff2647
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/__init__.py
@@ -0,0 +1,293 @@
+from abc import ABC, abstractmethod
+from threading import Lock
+
+from sentry_sdk.utils import logger
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+    from typing import Callable
+    from typing import Dict
+    from typing import Iterator
+    from typing import List
+    from typing import Optional
+    from typing import Set
+    from typing import Type
+    from typing import Union
+
+
+_DEFAULT_FAILED_REQUEST_STATUS_CODES = frozenset(range(500, 600))
+
+
+_installer_lock = Lock()
+
+# Set of all integration identifiers we have attempted to install
+_processed_integrations = set()  # type: Set[str]
+
+# Set of all integration identifiers we have actually installed
+_installed_integrations = set()  # type: Set[str]
+
+
+def _generate_default_integrations_iterator(
+    integrations,  # type: List[str]
+    auto_enabling_integrations,  # type: List[str]
+):
+    # type: (...) -> Callable[[bool], Iterator[Type[Integration]]]
+
+    def iter_default_integrations(with_auto_enabling_integrations):
+        # type: (bool) -> Iterator[Type[Integration]]
+        """Returns an iterator of the default integration classes:"""
+        from importlib import import_module
+
+        if with_auto_enabling_integrations:
+            all_import_strings = integrations + auto_enabling_integrations
+        else:
+            all_import_strings = integrations
+
+        for import_string in all_import_strings:
+            try:
+                module, cls = import_string.rsplit(".", 1)
+                yield getattr(import_module(module), cls)
+            except (DidNotEnable, SyntaxError) as e:
+                logger.debug(
+                    "Did not import default integration %s: %s", import_string, e
+                )
+
+    if isinstance(iter_default_integrations.__doc__, str):
+        for import_string in integrations:
+            iter_default_integrations.__doc__ += "\n- `{}`".format(import_string)
+
+    return iter_default_integrations
+
+
+_DEFAULT_INTEGRATIONS = [
+    # stdlib/base runtime integrations
+    "sentry_sdk.integrations.argv.ArgvIntegration",
+    "sentry_sdk.integrations.atexit.AtexitIntegration",
+    "sentry_sdk.integrations.dedupe.DedupeIntegration",
+    "sentry_sdk.integrations.excepthook.ExcepthookIntegration",
+    "sentry_sdk.integrations.logging.LoggingIntegration",
+    "sentry_sdk.integrations.modules.ModulesIntegration",
+    "sentry_sdk.integrations.stdlib.StdlibIntegration",
+    "sentry_sdk.integrations.threading.ThreadingIntegration",
+]
+
+_AUTO_ENABLING_INTEGRATIONS = [
+    "sentry_sdk.integrations.aiohttp.AioHttpIntegration",
+    "sentry_sdk.integrations.anthropic.AnthropicIntegration",
+    "sentry_sdk.integrations.ariadne.AriadneIntegration",
+    "sentry_sdk.integrations.arq.ArqIntegration",
+    "sentry_sdk.integrations.asyncpg.AsyncPGIntegration",
+    "sentry_sdk.integrations.boto3.Boto3Integration",
+    "sentry_sdk.integrations.bottle.BottleIntegration",
+    "sentry_sdk.integrations.celery.CeleryIntegration",
+    "sentry_sdk.integrations.chalice.ChaliceIntegration",
+    "sentry_sdk.integrations.clickhouse_driver.ClickhouseDriverIntegration",
+    "sentry_sdk.integrations.cohere.CohereIntegration",
+    "sentry_sdk.integrations.django.DjangoIntegration",
+    "sentry_sdk.integrations.falcon.FalconIntegration",
+    "sentry_sdk.integrations.fastapi.FastApiIntegration",
+    "sentry_sdk.integrations.flask.FlaskIntegration",
+    "sentry_sdk.integrations.gql.GQLIntegration",
+    "sentry_sdk.integrations.graphene.GrapheneIntegration",
+    "sentry_sdk.integrations.httpx.HttpxIntegration",
+    "sentry_sdk.integrations.huey.HueyIntegration",
+    "sentry_sdk.integrations.huggingface_hub.HuggingfaceHubIntegration",
+    "sentry_sdk.integrations.langchain.LangchainIntegration",
+    "sentry_sdk.integrations.litestar.LitestarIntegration",
+    "sentry_sdk.integrations.loguru.LoguruIntegration",
+    "sentry_sdk.integrations.openai.OpenAIIntegration",
+    "sentry_sdk.integrations.pymongo.PyMongoIntegration",
+    "sentry_sdk.integrations.pyramid.PyramidIntegration",
+    "sentry_sdk.integrations.quart.QuartIntegration",
+    "sentry_sdk.integrations.redis.RedisIntegration",
+    "sentry_sdk.integrations.rq.RqIntegration",
+    "sentry_sdk.integrations.sanic.SanicIntegration",
+    "sentry_sdk.integrations.sqlalchemy.SqlalchemyIntegration",
+    "sentry_sdk.integrations.starlette.StarletteIntegration",
+    "sentry_sdk.integrations.starlite.StarliteIntegration",
+    "sentry_sdk.integrations.strawberry.StrawberryIntegration",
+    "sentry_sdk.integrations.tornado.TornadoIntegration",
+]
+
+iter_default_integrations = _generate_default_integrations_iterator(
+    integrations=_DEFAULT_INTEGRATIONS,
+    auto_enabling_integrations=_AUTO_ENABLING_INTEGRATIONS,
+)
+
+del _generate_default_integrations_iterator
+
+
+_MIN_VERSIONS = {
+    "aiohttp": (3, 4),
+    "anthropic": (0, 16),
+    "ariadne": (0, 20),
+    "arq": (0, 23),
+    "asyncpg": (0, 23),
+    "beam": (2, 12),
+    "boto3": (1, 12),  # botocore
+    "bottle": (0, 12),
+    "celery": (4, 4, 7),
+    "chalice": (1, 16, 0),
+    "clickhouse_driver": (0, 2, 0),
+    "django": (1, 8),
+    "dramatiq": (1, 9),
+    "falcon": (1, 4),
+    "fastapi": (0, 79, 0),
+    "flask": (1, 1, 4),
+    "gql": (3, 4, 1),
+    "graphene": (3, 3),
+    "grpc": (1, 32, 0),  # grpcio
+    "huggingface_hub": (0, 22),
+    "langchain": (0, 0, 210),
+    "launchdarkly": (9, 8, 0),
+    "loguru": (0, 7, 0),
+    "openai": (1, 0, 0),
+    "openfeature": (0, 7, 1),
+    "quart": (0, 16, 0),
+    "ray": (2, 7, 0),
+    "requests": (2, 0, 0),
+    "rq": (0, 6),
+    "sanic": (0, 8),
+    "sqlalchemy": (1, 2),
+    "starlette": (0, 16),
+    "starlite": (1, 48),
+    "statsig": (0, 55, 3),
+    "strawberry": (0, 209, 5),
+    "tornado": (6, 0),
+    "typer": (0, 15),
+    "unleash": (6, 0, 1),
+}
+
+
+def setup_integrations(
+    integrations,
+    with_defaults=True,
+    with_auto_enabling_integrations=False,
+    disabled_integrations=None,
+):
+    # type: (Sequence[Integration], bool, bool, Optional[Sequence[Union[type[Integration], Integration]]]) -> Dict[str, Integration]
+    """
+    Given a list of integration instances, this installs them all.
+
+    When `with_defaults` is set to `True` all default integrations are added
+    unless they were already provided before.
+
+    `disabled_integrations` takes precedence over `with_defaults` and
+    `with_auto_enabling_integrations`.
+    """
+    integrations = dict(
+        (integration.identifier, integration) for integration in integrations or ()
+    )
+
+    logger.debug("Setting up integrations (with default = %s)", with_defaults)
+
+    # Integrations that will not be enabled
+    disabled_integrations = [
+        integration if isinstance(integration, type) else type(integration)
+        for integration in disabled_integrations or []
+    ]
+
+    # Integrations that are not explicitly set up by the user.
+    used_as_default_integration = set()
+
+    if with_defaults:
+        for integration_cls in iter_default_integrations(
+            with_auto_enabling_integrations
+        ):
+            if integration_cls.identifier not in integrations:
+                instance = integration_cls()
+                integrations[instance.identifier] = instance
+                used_as_default_integration.add(instance.identifier)
+
+    for identifier, integration in integrations.items():
+        with _installer_lock:
+            if identifier not in _processed_integrations:
+                if type(integration) in disabled_integrations:
+                    logger.debug("Ignoring integration %s", identifier)
+                else:
+                    logger.debug(
+                        "Setting up previously not enabled integration %s", identifier
+                    )
+                    try:
+                        type(integration).setup_once()
+                    except DidNotEnable as e:
+                        if identifier not in used_as_default_integration:
+                            raise
+
+                        logger.debug(
+                            "Did not enable default integration %s: %s", identifier, e
+                        )
+                    else:
+                        _installed_integrations.add(identifier)
+
+                _processed_integrations.add(identifier)
+
+    integrations = {
+        identifier: integration
+        for identifier, integration in integrations.items()
+        if identifier in _installed_integrations
+    }
+
+    for identifier in integrations:
+        logger.debug("Enabling integration %s", identifier)
+
+    return integrations
+
+
+def _check_minimum_version(integration, version, package=None):
+    # type: (type[Integration], Optional[tuple[int, ...]], Optional[str]) -> None
+    package = package or integration.identifier
+
+    if version is None:
+        raise DidNotEnable(f"Unparsable {package} version.")
+
+    min_version = _MIN_VERSIONS.get(integration.identifier)
+    if min_version is None:
+        return
+
+    if version < min_version:
+        raise DidNotEnable(
+            f"Integration only supports {package} {'.'.join(map(str, min_version))} or newer."
+        )
+
+
+class DidNotEnable(Exception):  # noqa: N818
+    """
+    The integration could not be enabled due to a trivial user error like
+    `flask` not being installed for the `FlaskIntegration`.
+
+    This exception is silently swallowed for default integrations, but reraised
+    for explicitly enabled integrations.
+    """
+
+
+class Integration(ABC):
+    """Baseclass for all integrations.
+
+    To accept options for an integration, implement your own constructor that
+    saves those options on `self`.
+    """
+
+    install = None
+    """Legacy method, do not implement."""
+
+    identifier = None  # type: str
+    """String unique ID of integration type"""
+
+    @staticmethod
+    @abstractmethod
+    def setup_once():
+        # type: () -> None
+        """
+        Initialize the integration.
+
+        This function is only called once, ever. Configuration is not available
+        at this point, so the only thing to do here is to hook into exception
+        handlers, and perhaps do monkeypatches.
+
+        Inside those hooks `Integration.current` can be used to access the
+        instance again.
+        """
+        pass
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_asgi_common.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_asgi_common.py
new file mode 100644
index 00000000..c16bbbcf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_asgi_common.py
@@ -0,0 +1,108 @@
+import urllib
+
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Optional
+    from typing import Union
+    from typing_extensions import Literal
+
+    from sentry_sdk.utils import AnnotatedValue
+
+
+def _get_headers(asgi_scope):
+    # type: (Any) -> Dict[str, str]
+    """
+    Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
+    """
+    headers = {}  # type: Dict[str, str]
+    for raw_key, raw_value in asgi_scope["headers"]:
+        key = raw_key.decode("latin-1")
+        value = raw_value.decode("latin-1")
+        if key in headers:
+            headers[key] = headers[key] + ", " + value
+        else:
+            headers[key] = value
+
+    return headers
+
+
+def _get_url(asgi_scope, default_scheme, host):
+    # type: (Dict[str, Any], Literal["ws", "http"], Optional[Union[AnnotatedValue, str]]) -> str
+    """
+    Extract URL from the ASGI scope, without also including the querystring.
+    """
+    scheme = asgi_scope.get("scheme", default_scheme)
+
+    server = asgi_scope.get("server", None)
+    path = asgi_scope.get("root_path", "") + asgi_scope.get("path", "")
+
+    if host:
+        return "%s://%s%s" % (scheme, host, path)
+
+    if server is not None:
+        host, port = server
+        default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}.get(scheme)
+        if port != default_port:
+            return "%s://%s:%s%s" % (scheme, host, port, path)
+        return "%s://%s%s" % (scheme, host, path)
+    return path
+
+
+def _get_query(asgi_scope):
+    # type: (Any) -> Any
+    """
+    Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
+    """
+    qs = asgi_scope.get("query_string")
+    if not qs:
+        return None
+    return urllib.parse.unquote(qs.decode("latin-1"))
+
+
+def _get_ip(asgi_scope):
+    # type: (Any) -> str
+    """
+    Extract IP Address from the ASGI scope based on request headers with fallback to scope client.
+    """
+    headers = _get_headers(asgi_scope)
+    try:
+        return headers["x-forwarded-for"].split(",")[0].strip()
+    except (KeyError, IndexError):
+        pass
+
+    try:
+        return headers["x-real-ip"]
+    except KeyError:
+        pass
+
+    return asgi_scope.get("client")[0]
+
+
+def _get_request_data(asgi_scope):
+    # type: (Any) -> Dict[str, Any]
+    """
+    Returns data related to the HTTP request from the ASGI scope.
+    """
+    request_data = {}  # type: Dict[str, Any]
+    ty = asgi_scope["type"]
+    if ty in ("http", "websocket"):
+        request_data["method"] = asgi_scope.get("method")
+
+        request_data["headers"] = headers = _filter_headers(_get_headers(asgi_scope))
+        request_data["query_string"] = _get_query(asgi_scope)
+
+        request_data["url"] = _get_url(
+            asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
+        )
+
+    client = asgi_scope.get("client")
+    if client and should_send_default_pii():
+        request_data["env"] = {"REMOTE_ADDR": _get_ip(asgi_scope)}
+
+    return request_data
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_wsgi_common.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_wsgi_common.py
new file mode 100644
index 00000000..48bc4328
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/_wsgi_common.py
@@ -0,0 +1,271 @@
+from contextlib import contextmanager
+import json
+from copy import deepcopy
+
+import sentry_sdk
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import AnnotatedValue, logger
+
+try:
+    from django.http.request import RawPostDataException
+except ImportError:
+    RawPostDataException = None
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Iterator
+    from typing import Mapping
+    from typing import MutableMapping
+    from typing import Optional
+    from typing import Union
+    from sentry_sdk._types import Event, HttpStatusCodeRange
+
+
+SENSITIVE_ENV_KEYS = (
+    "REMOTE_ADDR",
+    "HTTP_X_FORWARDED_FOR",
+    "HTTP_SET_COOKIE",
+    "HTTP_COOKIE",
+    "HTTP_AUTHORIZATION",
+    "HTTP_X_API_KEY",
+    "HTTP_X_FORWARDED_FOR",
+    "HTTP_X_REAL_IP",
+)
+
+SENSITIVE_HEADERS = tuple(
+    x[len("HTTP_") :] for x in SENSITIVE_ENV_KEYS if x.startswith("HTTP_")
+)
+
+DEFAULT_HTTP_METHODS_TO_CAPTURE = (
+    "CONNECT",
+    "DELETE",
+    "GET",
+    # "HEAD",  # do not capture HEAD requests by default
+    # "OPTIONS",  # do not capture OPTIONS requests by default
+    "PATCH",
+    "POST",
+    "PUT",
+    "TRACE",
+)
+
+
+# This noop context manager can be replaced with "from contextlib import nullcontext" when we drop Python 3.6 support
+@contextmanager
+def nullcontext():
+    # type: () -> Iterator[None]
+    yield
+
+
+def request_body_within_bounds(client, content_length):
+    # type: (Optional[sentry_sdk.client.BaseClient], int) -> bool
+    if client is None:
+        return False
+
+    bodies = client.options["max_request_body_size"]
+    return not (
+        bodies == "never"
+        or (bodies == "small" and content_length > 10**3)
+        or (bodies == "medium" and content_length > 10**4)
+    )
+
+
+class RequestExtractor:
+    """
+    Base class for request extraction.
+    """
+
+    # It does not make sense to make this class an ABC because it is not used
+    # for typing, only so that child classes can inherit common methods from
+    # it. Only some child classes implement all methods that raise
+    # NotImplementedError in this class.
+
+    def __init__(self, request):
+        # type: (Any) -> None
+        self.request = request
+
+    def extract_into_event(self, event):
+        # type: (Event) -> None
+        client = sentry_sdk.get_client()
+        if not client.is_active():
+            return
+
+        data = None  # type: Optional[Union[AnnotatedValue, Dict[str, Any]]]
+
+        content_length = self.content_length()
+        request_info = event.get("request", {})
+
+        if should_send_default_pii():
+            request_info["cookies"] = dict(self.cookies())
+
+        if not request_body_within_bounds(client, content_length):
+            data = AnnotatedValue.removed_because_over_size_limit()
+        else:
+            # First read the raw body data
+            # It is important to read this first because if it is Django
+            # it will cache the body and then we can read the cached version
+            # again in parsed_body() (or json() or wherever).
+            raw_data = None
+            try:
+                raw_data = self.raw_data()
+            except (RawPostDataException, ValueError):
+                # If DjangoRestFramework is used it already read the body for us
+                # so reading it here will fail. We can ignore this.
+                pass
+
+            parsed_body = self.parsed_body()
+            if parsed_body is not None:
+                data = parsed_body
+            elif raw_data:
+                data = AnnotatedValue.removed_because_raw_data()
+            else:
+                data = None
+
+        if data is not None:
+            request_info["data"] = data
+
+        event["request"] = deepcopy(request_info)
+
+    def content_length(self):
+        # type: () -> int
+        try:
+            return int(self.env().get("CONTENT_LENGTH", 0))
+        except ValueError:
+            return 0
+
+    def cookies(self):
+        # type: () -> MutableMapping[str, Any]
+        raise NotImplementedError()
+
+    def raw_data(self):
+        # type: () -> Optional[Union[str, bytes]]
+        raise NotImplementedError()
+
+    def form(self):
+        # type: () -> Optional[Dict[str, Any]]
+        raise NotImplementedError()
+
+    def parsed_body(self):
+        # type: () -> Optional[Dict[str, Any]]
+        try:
+            form = self.form()
+        except Exception:
+            form = None
+        try:
+            files = self.files()
+        except Exception:
+            files = None
+
+        if form or files:
+            data = {}
+            if form:
+                data = dict(form.items())
+            if files:
+                for key in files.keys():
+                    data[key] = AnnotatedValue.removed_because_raw_data()
+
+            return data
+
+        return self.json()
+
+    def is_json(self):
+        # type: () -> bool
+        return _is_json_content_type(self.env().get("CONTENT_TYPE"))
+
+    def json(self):
+        # type: () -> Optional[Any]
+        try:
+            if not self.is_json():
+                return None
+
+            try:
+                raw_data = self.raw_data()
+            except (RawPostDataException, ValueError):
+                # The body might have already been read, in which case this will
+                # fail
+                raw_data = None
+
+            if raw_data is None:
+                return None
+
+            if isinstance(raw_data, str):
+                return json.loads(raw_data)
+            else:
+                return json.loads(raw_data.decode("utf-8"))
+        except ValueError:
+            pass
+
+        return None
+
+    def files(self):
+        # type: () -> Optional[Dict[str, Any]]
+        raise NotImplementedError()
+
+    def size_of_file(self, file):
+        # type: (Any) -> int
+        raise NotImplementedError()
+
+    def env(self):
+        # type: () -> Dict[str, Any]
+        raise NotImplementedError()
+
+
+def _is_json_content_type(ct):
+    # type: (Optional[str]) -> bool
+    mt = (ct or "").split(";", 1)[0]
+    return (
+        mt == "application/json"
+        or (mt.startswith("application/"))
+        and mt.endswith("+json")
+    )
+
+
+def _filter_headers(headers):
+    # type: (Mapping[str, str]) -> Mapping[str, Union[AnnotatedValue, str]]
+    if should_send_default_pii():
+        return headers
+
+    return {
+        k: (
+            v
+            if k.upper().replace("-", "_") not in SENSITIVE_HEADERS
+            else AnnotatedValue.removed_because_over_size_limit()
+        )
+        for k, v in headers.items()
+    }
+
+
+def _in_http_status_code_range(code, code_ranges):
+    # type: (object, list[HttpStatusCodeRange]) -> bool
+    for target in code_ranges:
+        if isinstance(target, int):
+            if code == target:
+                return True
+            continue
+
+        try:
+            if code in target:
+                return True
+        except TypeError:
+            logger.warning(
+                "failed_request_status_codes has to be a list of integers or containers"
+            )
+
+    return False
+
+
+class HttpCodeRangeContainer:
+    """
+    Wrapper to make it possible to use list[HttpStatusCodeRange] as a Container[int].
+    Used for backwards compatibility with the old `failed_request_status_codes` option.
+    """
+
+    def __init__(self, code_ranges):
+        # type: (list[HttpStatusCodeRange]) -> None
+        self._code_ranges = code_ranges
+
+    def __contains__(self, item):
+        # type: (object) -> bool
+        return _in_http_status_code_range(item, self._code_ranges)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aiohttp.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aiohttp.py
new file mode 100644
index 00000000..ad3202bf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aiohttp.py
@@ -0,0 +1,357 @@
+import sys
+import weakref
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP, SPANSTATUS, SPANDATA
+from sentry_sdk.integrations import (
+    _DEFAULT_FAILED_REQUEST_STATUS_CODES,
+    _check_minimum_version,
+    Integration,
+    DidNotEnable,
+)
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.sessions import track_session
+from sentry_sdk.integrations._wsgi_common import (
+    _filter_headers,
+    request_body_within_bounds,
+)
+from sentry_sdk.tracing import (
+    BAGGAGE_HEADER_NAME,
+    SOURCE_FOR_STYLE,
+    TransactionSource,
+)
+from sentry_sdk.tracing_utils import should_propagate_trace
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    logger,
+    parse_url,
+    parse_version,
+    reraise,
+    transaction_from_function,
+    HAS_REAL_CONTEXTVARS,
+    CONTEXTVARS_ERROR_MESSAGE,
+    SENSITIVE_DATA_SUBSTITUTE,
+    AnnotatedValue,
+)
+
+try:
+    import asyncio
+
+    from aiohttp import __version__ as AIOHTTP_VERSION
+    from aiohttp import ClientSession, TraceConfig
+    from aiohttp.web import Application, HTTPException, UrlDispatcher
+except ImportError:
+    raise DidNotEnable("AIOHTTP not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from aiohttp.web_request import Request
+    from aiohttp.web_urldispatcher import UrlMappingMatchInfo
+    from aiohttp import TraceRequestStartParams, TraceRequestEndParams
+
+    from collections.abc import Set
+    from types import SimpleNamespace
+    from typing import Any
+    from typing import Optional
+    from typing import Tuple
+    from typing import Union
+
+    from sentry_sdk.utils import ExcInfo
+    from sentry_sdk._types import Event, EventProcessor
+
+
+TRANSACTION_STYLE_VALUES = ("handler_name", "method_and_path_pattern")
+
+
+class AioHttpIntegration(Integration):
+    identifier = "aiohttp"
+    origin = f"auto.http.{identifier}"
+
+    def __init__(
+        self,
+        transaction_style="handler_name",  # type: str
+        *,
+        failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES,  # type: Set[int]
+    ):
+        # type: (...) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+        self._failed_request_status_codes = failed_request_status_codes
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        version = parse_version(AIOHTTP_VERSION)
+        _check_minimum_version(AioHttpIntegration, version)
+
+        if not HAS_REAL_CONTEXTVARS:
+            # We better have contextvars or we're going to leak state between
+            # requests.
+            raise DidNotEnable(
+                "The aiohttp integration for Sentry requires Python 3.7+ "
+                " or aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
+            )
+
+        ignore_logger("aiohttp.server")
+
+        old_handle = Application._handle
+
+        async def sentry_app_handle(self, request, *args, **kwargs):
+            # type: (Any, Request, *Any, **Any) -> Any
+            integration = sentry_sdk.get_client().get_integration(AioHttpIntegration)
+            if integration is None:
+                return await old_handle(self, request, *args, **kwargs)
+
+            weak_request = weakref.ref(request)
+
+            with sentry_sdk.isolation_scope() as scope:
+                with track_session(scope, session_mode="request"):
+                    # Scope data will not leak between requests because aiohttp
+                    # create a task to wrap each request.
+                    scope.generate_propagation_context()
+                    scope.clear_breadcrumbs()
+                    scope.add_event_processor(_make_request_processor(weak_request))
+
+                    headers = dict(request.headers)
+                    transaction = continue_trace(
+                        headers,
+                        op=OP.HTTP_SERVER,
+                        # If this transaction name makes it to the UI, AIOHTTP's
+                        # URL resolver did not find a route or died trying.
+                        name="generic AIOHTTP request",
+                        source=TransactionSource.ROUTE,
+                        origin=AioHttpIntegration.origin,
+                    )
+                    with sentry_sdk.start_transaction(
+                        transaction,
+                        custom_sampling_context={"aiohttp_request": request},
+                    ):
+                        try:
+                            response = await old_handle(self, request)
+                        except HTTPException as e:
+                            transaction.set_http_status(e.status_code)
+
+                            if (
+                                e.status_code
+                                in integration._failed_request_status_codes
+                            ):
+                                _capture_exception()
+
+                            raise
+                        except (asyncio.CancelledError, ConnectionResetError):
+                            transaction.set_status(SPANSTATUS.CANCELLED)
+                            raise
+                        except Exception:
+                            # This will probably map to a 500 but seems like we
+                            # have no way to tell. Do not set span status.
+                            reraise(*_capture_exception())
+
+                        try:
+                            # A valid response handler will return a valid response with a status. But, if the handler
+                            # returns an invalid response (e.g. None), the line below will raise an AttributeError.
+                            # Even though this is likely invalid, we need to handle this case to ensure we don't break
+                            # the application.
+                            response_status = response.status
+                        except AttributeError:
+                            pass
+                        else:
+                            transaction.set_http_status(response_status)
+
+                        return response
+
+        Application._handle = sentry_app_handle
+
+        old_urldispatcher_resolve = UrlDispatcher.resolve
+
+        @wraps(old_urldispatcher_resolve)
+        async def sentry_urldispatcher_resolve(self, request):
+            # type: (UrlDispatcher, Request) -> UrlMappingMatchInfo
+            rv = await old_urldispatcher_resolve(self, request)
+
+            integration = sentry_sdk.get_client().get_integration(AioHttpIntegration)
+            if integration is None:
+                return rv
+
+            name = None
+
+            try:
+                if integration.transaction_style == "handler_name":
+                    name = transaction_from_function(rv.handler)
+                elif integration.transaction_style == "method_and_path_pattern":
+                    route_info = rv.get_info()
+                    pattern = route_info.get("path") or route_info.get("formatter")
+                    name = "{} {}".format(request.method, pattern)
+            except Exception:
+                pass
+
+            if name is not None:
+                sentry_sdk.get_current_scope().set_transaction_name(
+                    name,
+                    source=SOURCE_FOR_STYLE[integration.transaction_style],
+                )
+
+            return rv
+
+        UrlDispatcher.resolve = sentry_urldispatcher_resolve
+
+        old_client_session_init = ClientSession.__init__
+
+        @ensure_integration_enabled(AioHttpIntegration, old_client_session_init)
+        def init(*args, **kwargs):
+            # type: (Any, Any) -> None
+            client_trace_configs = list(kwargs.get("trace_configs") or ())
+            trace_config = create_trace_config()
+            client_trace_configs.append(trace_config)
+
+            kwargs["trace_configs"] = client_trace_configs
+            return old_client_session_init(*args, **kwargs)
+
+        ClientSession.__init__ = init
+
+
+def create_trace_config():
+    # type: () -> TraceConfig
+
+    async def on_request_start(session, trace_config_ctx, params):
+        # type: (ClientSession, SimpleNamespace, TraceRequestStartParams) -> None
+        if sentry_sdk.get_client().get_integration(AioHttpIntegration) is None:
+            return
+
+        method = params.method.upper()
+
+        parsed_url = None
+        with capture_internal_exceptions():
+            parsed_url = parse_url(str(params.url), sanitize=False)
+
+        span = sentry_sdk.start_span(
+            op=OP.HTTP_CLIENT,
+            name="%s %s"
+            % (method, parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE),
+            origin=AioHttpIntegration.origin,
+        )
+        span.set_data(SPANDATA.HTTP_METHOD, method)
+        if parsed_url is not None:
+            span.set_data("url", parsed_url.url)
+            span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query)
+            span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment)
+
+        client = sentry_sdk.get_client()
+
+        if should_propagate_trace(client, str(params.url)):
+            for (
+                key,
+                value,
+            ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers(
+                span=span
+            ):
+                logger.debug(
+                    "[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format(
+                        key=key, value=value, url=params.url
+                    )
+                )
+                if key == BAGGAGE_HEADER_NAME and params.headers.get(
+                    BAGGAGE_HEADER_NAME
+                ):
+                    # do not overwrite any existing baggage, just append to it
+                    params.headers[key] += "," + value
+                else:
+                    params.headers[key] = value
+
+        trace_config_ctx.span = span
+
+    async def on_request_end(session, trace_config_ctx, params):
+        # type: (ClientSession, SimpleNamespace, TraceRequestEndParams) -> None
+        if trace_config_ctx.span is None:
+            return
+
+        span = trace_config_ctx.span
+        span.set_http_status(int(params.response.status))
+        span.set_data("reason", params.response.reason)
+        span.finish()
+
+    trace_config = TraceConfig()
+
+    trace_config.on_request_start.append(on_request_start)
+    trace_config.on_request_end.append(on_request_end)
+
+    return trace_config
+
+
+def _make_request_processor(weak_request):
+    # type: (weakref.ReferenceType[Request]) -> EventProcessor
+    def aiohttp_processor(
+        event,  # type: Event
+        hint,  # type: dict[str, Tuple[type, BaseException, Any]]
+    ):
+        # type: (...) -> Event
+        request = weak_request()
+        if request is None:
+            return event
+
+        with capture_internal_exceptions():
+            request_info = event.setdefault("request", {})
+
+            request_info["url"] = "%s://%s%s" % (
+                request.scheme,
+                request.host,
+                request.path,
+            )
+
+            request_info["query_string"] = request.query_string
+            request_info["method"] = request.method
+            request_info["env"] = {"REMOTE_ADDR": request.remote}
+            request_info["headers"] = _filter_headers(dict(request.headers))
+
+            # Just attach raw data here if it is within bounds, if available.
+            # Unfortunately there's no way to get structured data from aiohttp
+            # without awaiting on some coroutine.
+            request_info["data"] = get_aiohttp_request_data(request)
+
+        return event
+
+    return aiohttp_processor
+
+
+def _capture_exception():
+    # type: () -> ExcInfo
+    exc_info = sys.exc_info()
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "aiohttp", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+    return exc_info
+
+
+BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]"
+
+
+def get_aiohttp_request_data(request):
+    # type: (Request) -> Union[Optional[str], AnnotatedValue]
+    bytes_body = request._read_bytes
+
+    if bytes_body is not None:
+        # we have body to show
+        if not request_body_within_bounds(sentry_sdk.get_client(), len(bytes_body)):
+            return AnnotatedValue.removed_because_over_size_limit()
+
+        encoding = request.charset or "utf-8"
+        return bytes_body.decode(encoding, "replace")
+
+    if request.can_read_body:
+        # body exists but we can't show it
+        return BODY_NOT_READ_MESSAGE
+
+    # request has no body
+    return None
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/anthropic.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/anthropic.py
new file mode 100644
index 00000000..4cb54309
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/anthropic.py
@@ -0,0 +1,288 @@
+from functools import wraps
+from typing import TYPE_CHECKING
+
+import sentry_sdk
+from sentry_sdk.ai.monitoring import record_token_usage
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+    package_version,
+)
+
+try:
+    from anthropic.resources import AsyncMessages, Messages
+
+    if TYPE_CHECKING:
+        from anthropic.types import MessageStreamEvent
+except ImportError:
+    raise DidNotEnable("Anthropic not installed")
+
+if TYPE_CHECKING:
+    from typing import Any, AsyncIterator, Iterator
+    from sentry_sdk.tracing import Span
+
+
+class AnthropicIntegration(Integration):
+    identifier = "anthropic"
+    origin = f"auto.ai.{identifier}"
+
+    def __init__(self, include_prompts=True):
+        # type: (AnthropicIntegration, bool) -> None
+        self.include_prompts = include_prompts
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = package_version("anthropic")
+        _check_minimum_version(AnthropicIntegration, version)
+
+        Messages.create = _wrap_message_create(Messages.create)
+        AsyncMessages.create = _wrap_message_create_async(AsyncMessages.create)
+
+
+def _capture_exception(exc):
+    # type: (Any) -> None
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "anthropic", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _calculate_token_usage(result, span):
+    # type: (Messages, Span) -> None
+    input_tokens = 0
+    output_tokens = 0
+    if hasattr(result, "usage"):
+        usage = result.usage
+        if hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int):
+            input_tokens = usage.input_tokens
+        if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int):
+            output_tokens = usage.output_tokens
+
+    total_tokens = input_tokens + output_tokens
+    record_token_usage(span, input_tokens, output_tokens, total_tokens)
+
+
+def _get_responses(content):
+    # type: (list[Any]) -> list[dict[str, Any]]
+    """
+    Get JSON of a Anthropic responses.
+    """
+    responses = []
+    for item in content:
+        if hasattr(item, "text"):
+            responses.append(
+                {
+                    "type": item.type,
+                    "text": item.text,
+                }
+            )
+    return responses
+
+
+def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
+    # type: (MessageStreamEvent, int, int, list[str]) -> tuple[int, int, list[str]]
+    """
+    Count token usage and collect content blocks from the AI streaming response.
+    """
+    with capture_internal_exceptions():
+        if hasattr(event, "type"):
+            if event.type == "message_start":
+                usage = event.message.usage
+                input_tokens += usage.input_tokens
+                output_tokens += usage.output_tokens
+            elif event.type == "content_block_start":
+                pass
+            elif event.type == "content_block_delta":
+                if hasattr(event.delta, "text"):
+                    content_blocks.append(event.delta.text)
+                elif hasattr(event.delta, "partial_json"):
+                    content_blocks.append(event.delta.partial_json)
+            elif event.type == "content_block_stop":
+                pass
+            elif event.type == "message_delta":
+                output_tokens += event.usage.output_tokens
+
+    return input_tokens, output_tokens, content_blocks
+
+
+def _add_ai_data_to_span(
+    span, integration, input_tokens, output_tokens, content_blocks
+):
+    # type: (Span, AnthropicIntegration, int, int, list[str]) -> None
+    """
+    Add token usage and content blocks from the AI streaming response to the span.
+    """
+    with capture_internal_exceptions():
+        if should_send_default_pii() and integration.include_prompts:
+            complete_message = "".join(content_blocks)
+            span.set_data(
+                SPANDATA.AI_RESPONSES,
+                [{"type": "text", "text": complete_message}],
+            )
+        total_tokens = input_tokens + output_tokens
+        record_token_usage(span, input_tokens, output_tokens, total_tokens)
+        span.set_data(SPANDATA.AI_STREAMING, True)
+
+
+def _sentry_patched_create_common(f, *args, **kwargs):
+    # type: (Any, *Any, **Any) -> Any
+    integration = kwargs.pop("integration")
+    if integration is None:
+        return f(*args, **kwargs)
+
+    if "messages" not in kwargs:
+        return f(*args, **kwargs)
+
+    try:
+        iter(kwargs["messages"])
+    except TypeError:
+        return f(*args, **kwargs)
+
+    span = sentry_sdk.start_span(
+        op=OP.ANTHROPIC_MESSAGES_CREATE,
+        description="Anthropic messages create",
+        origin=AnthropicIntegration.origin,
+    )
+    span.__enter__()
+
+    result = yield f, args, kwargs
+
+    # add data to span and finish it
+    messages = list(kwargs["messages"])
+    model = kwargs.get("model")
+
+    with capture_internal_exceptions():
+        span.set_data(SPANDATA.AI_MODEL_ID, model)
+        span.set_data(SPANDATA.AI_STREAMING, False)
+
+        if should_send_default_pii() and integration.include_prompts:
+            span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages)
+
+        if hasattr(result, "content"):
+            if should_send_default_pii() and integration.include_prompts:
+                span.set_data(SPANDATA.AI_RESPONSES, _get_responses(result.content))
+            _calculate_token_usage(result, span)
+            span.__exit__(None, None, None)
+
+        # Streaming response
+        elif hasattr(result, "_iterator"):
+            old_iterator = result._iterator
+
+            def new_iterator():
+                # type: () -> Iterator[MessageStreamEvent]
+                input_tokens = 0
+                output_tokens = 0
+                content_blocks = []  # type: list[str]
+
+                for event in old_iterator:
+                    input_tokens, output_tokens, content_blocks = _collect_ai_data(
+                        event, input_tokens, output_tokens, content_blocks
+                    )
+                    if event.type != "message_stop":
+                        yield event
+
+                _add_ai_data_to_span(
+                    span, integration, input_tokens, output_tokens, content_blocks
+                )
+                span.__exit__(None, None, None)
+
+            async def new_iterator_async():
+                # type: () -> AsyncIterator[MessageStreamEvent]
+                input_tokens = 0
+                output_tokens = 0
+                content_blocks = []  # type: list[str]
+
+                async for event in old_iterator:
+                    input_tokens, output_tokens, content_blocks = _collect_ai_data(
+                        event, input_tokens, output_tokens, content_blocks
+                    )
+                    if event.type != "message_stop":
+                        yield event
+
+                _add_ai_data_to_span(
+                    span, integration, input_tokens, output_tokens, content_blocks
+                )
+                span.__exit__(None, None, None)
+
+            if str(type(result._iterator)) == "<class 'async_generator'>":
+                result._iterator = new_iterator_async()
+            else:
+                result._iterator = new_iterator()
+
+        else:
+            span.set_data("unknown_response", True)
+            span.__exit__(None, None, None)
+
+    return result
+
+
+def _wrap_message_create(f):
+    # type: (Any) -> Any
+    def _execute_sync(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _sentry_patched_create_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return e.value
+
+        try:
+            try:
+                result = f(*args, **kwargs)
+            except Exception as exc:
+                _capture_exception(exc)
+                raise exc from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    def _sentry_patched_create_sync(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
+        kwargs["integration"] = integration
+
+        return _execute_sync(f, *args, **kwargs)
+
+    return _sentry_patched_create_sync
+
+
+def _wrap_message_create_async(f):
+    # type: (Any) -> Any
+    async def _execute_async(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _sentry_patched_create_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return await e.value
+
+        try:
+            try:
+                result = await f(*args, **kwargs)
+            except Exception as exc:
+                _capture_exception(exc)
+                raise exc from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    async def _sentry_patched_create_async(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
+        kwargs["integration"] = integration
+
+        return await _execute_async(f, *args, **kwargs)
+
+    return _sentry_patched_create_async
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/argv.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/argv.py
new file mode 100644
index 00000000..315feefb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/argv.py
@@ -0,0 +1,31 @@
+import sys
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+
+    from sentry_sdk._types import Event, Hint
+
+
+class ArgvIntegration(Integration):
+    identifier = "argv"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        @add_global_event_processor
+        def processor(event, hint):
+            # type: (Event, Optional[Hint]) -> Optional[Event]
+            if sentry_sdk.get_client().get_integration(ArgvIntegration) is not None:
+                extra = event.setdefault("extra", {})
+                # If some event processor decided to set extra to e.g. an
+                # `int`, don't crash. Not here.
+                if isinstance(extra, dict):
+                    extra["sys.argv"] = sys.argv
+
+            return event
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ariadne.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ariadne.py
new file mode 100644
index 00000000..1a95bc01
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ariadne.py
@@ -0,0 +1,161 @@
+from importlib import import_module
+
+import sentry_sdk
+from sentry_sdk import get_client, capture_event
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.integrations._wsgi_common import request_body_within_bounds
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    package_version,
+)
+
+try:
+    # importing like this is necessary due to name shadowing in ariadne
+    # (ariadne.graphql is also a function)
+    ariadne_graphql = import_module("ariadne.graphql")
+except ImportError:
+    raise DidNotEnable("ariadne is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Dict, List, Optional
+    from ariadne.types import GraphQLError, GraphQLResult, GraphQLSchema, QueryParser  # type: ignore
+    from graphql.language.ast import DocumentNode
+    from sentry_sdk._types import Event, EventProcessor
+
+
+class AriadneIntegration(Integration):
+    identifier = "ariadne"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = package_version("ariadne")
+        _check_minimum_version(AriadneIntegration, version)
+
+        ignore_logger("ariadne")
+
+        _patch_graphql()
+
+
+def _patch_graphql():
+    # type: () -> None
+    old_parse_query = ariadne_graphql.parse_query
+    old_handle_errors = ariadne_graphql.handle_graphql_errors
+    old_handle_query_result = ariadne_graphql.handle_query_result
+
+    @ensure_integration_enabled(AriadneIntegration, old_parse_query)
+    def _sentry_patched_parse_query(context_value, query_parser, data):
+        # type: (Optional[Any], Optional[QueryParser], Any) -> DocumentNode
+        event_processor = _make_request_event_processor(data)
+        sentry_sdk.get_isolation_scope().add_event_processor(event_processor)
+
+        result = old_parse_query(context_value, query_parser, data)
+        return result
+
+    @ensure_integration_enabled(AriadneIntegration, old_handle_errors)
+    def _sentry_patched_handle_graphql_errors(errors, *args, **kwargs):
+        # type: (List[GraphQLError], Any, Any) -> GraphQLResult
+        result = old_handle_errors(errors, *args, **kwargs)
+
+        event_processor = _make_response_event_processor(result[1])
+        sentry_sdk.get_isolation_scope().add_event_processor(event_processor)
+
+        client = get_client()
+        if client.is_active():
+            with capture_internal_exceptions():
+                for error in errors:
+                    event, hint = event_from_exception(
+                        error,
+                        client_options=client.options,
+                        mechanism={
+                            "type": AriadneIntegration.identifier,
+                            "handled": False,
+                        },
+                    )
+                    capture_event(event, hint=hint)
+
+        return result
+
+    @ensure_integration_enabled(AriadneIntegration, old_handle_query_result)
+    def _sentry_patched_handle_query_result(result, *args, **kwargs):
+        # type: (Any, Any, Any) -> GraphQLResult
+        query_result = old_handle_query_result(result, *args, **kwargs)
+
+        event_processor = _make_response_event_processor(query_result[1])
+        sentry_sdk.get_isolation_scope().add_event_processor(event_processor)
+
+        client = get_client()
+        if client.is_active():
+            with capture_internal_exceptions():
+                for error in result.errors or []:
+                    event, hint = event_from_exception(
+                        error,
+                        client_options=client.options,
+                        mechanism={
+                            "type": AriadneIntegration.identifier,
+                            "handled": False,
+                        },
+                    )
+                    capture_event(event, hint=hint)
+
+        return query_result
+
+    ariadne_graphql.parse_query = _sentry_patched_parse_query  # type: ignore
+    ariadne_graphql.handle_graphql_errors = _sentry_patched_handle_graphql_errors  # type: ignore
+    ariadne_graphql.handle_query_result = _sentry_patched_handle_query_result  # type: ignore
+
+
+def _make_request_event_processor(data):
+    # type: (GraphQLSchema) -> EventProcessor
+    """Add request data and api_target to events."""
+
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        if not isinstance(data, dict):
+            return event
+
+        with capture_internal_exceptions():
+            try:
+                content_length = int(
+                    (data.get("headers") or {}).get("Content-Length", 0)
+                )
+            except (TypeError, ValueError):
+                return event
+
+            if should_send_default_pii() and request_body_within_bounds(
+                get_client(), content_length
+            ):
+                request_info = event.setdefault("request", {})
+                request_info["api_target"] = "graphql"
+                request_info["data"] = data
+
+            elif event.get("request", {}).get("data"):
+                del event["request"]["data"]
+
+        return event
+
+    return inner
+
+
+def _make_response_event_processor(response):
+    # type: (Dict[str, Any]) -> EventProcessor
+    """Add response data to the event's response context."""
+
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        with capture_internal_exceptions():
+            if should_send_default_pii() and response.get("errors"):
+                contexts = event.setdefault("contexts", {})
+                contexts["response"] = {
+                    "data": response,
+                }
+
+        return event
+
+    return inner
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/arq.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/arq.py
new file mode 100644
index 00000000..1ea8e32f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/arq.py
@@ -0,0 +1,246 @@
+import sys
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANSTATUS
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import Transaction, TransactionSource
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    SENSITIVE_DATA_SUBSTITUTE,
+    parse_version,
+    reraise,
+)
+
+try:
+    import arq.worker
+    from arq.version import VERSION as ARQ_VERSION
+    from arq.connections import ArqRedis
+    from arq.worker import JobExecutionFailed, Retry, RetryJob, Worker
+except ImportError:
+    raise DidNotEnable("Arq is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Dict, Optional, Union
+
+    from sentry_sdk._types import EventProcessor, Event, ExcInfo, Hint
+
+    from arq.cron import CronJob
+    from arq.jobs import Job
+    from arq.typing import WorkerCoroutine
+    from arq.worker import Function
+
+ARQ_CONTROL_FLOW_EXCEPTIONS = (JobExecutionFailed, Retry, RetryJob)
+
+
+class ArqIntegration(Integration):
+    identifier = "arq"
+    origin = f"auto.queue.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        try:
+            if isinstance(ARQ_VERSION, str):
+                version = parse_version(ARQ_VERSION)
+            else:
+                version = ARQ_VERSION.version[:2]
+
+        except (TypeError, ValueError):
+            version = None
+
+        _check_minimum_version(ArqIntegration, version)
+
+        patch_enqueue_job()
+        patch_run_job()
+        patch_create_worker()
+
+        ignore_logger("arq.worker")
+
+
+def patch_enqueue_job():
+    # type: () -> None
+    old_enqueue_job = ArqRedis.enqueue_job
+    original_kwdefaults = old_enqueue_job.__kwdefaults__
+
+    async def _sentry_enqueue_job(self, function, *args, **kwargs):
+        # type: (ArqRedis, str, *Any, **Any) -> Optional[Job]
+        integration = sentry_sdk.get_client().get_integration(ArqIntegration)
+        if integration is None:
+            return await old_enqueue_job(self, function, *args, **kwargs)
+
+        with sentry_sdk.start_span(
+            op=OP.QUEUE_SUBMIT_ARQ, name=function, origin=ArqIntegration.origin
+        ):
+            return await old_enqueue_job(self, function, *args, **kwargs)
+
+    _sentry_enqueue_job.__kwdefaults__ = original_kwdefaults
+    ArqRedis.enqueue_job = _sentry_enqueue_job
+
+
+def patch_run_job():
+    # type: () -> None
+    old_run_job = Worker.run_job
+
+    async def _sentry_run_job(self, job_id, score):
+        # type: (Worker, str, int) -> None
+        integration = sentry_sdk.get_client().get_integration(ArqIntegration)
+        if integration is None:
+            return await old_run_job(self, job_id, score)
+
+        with sentry_sdk.isolation_scope() as scope:
+            scope._name = "arq"
+            scope.clear_breadcrumbs()
+
+            transaction = Transaction(
+                name="unknown arq task",
+                status="ok",
+                op=OP.QUEUE_TASK_ARQ,
+                source=TransactionSource.TASK,
+                origin=ArqIntegration.origin,
+            )
+
+            with sentry_sdk.start_transaction(transaction):
+                return await old_run_job(self, job_id, score)
+
+    Worker.run_job = _sentry_run_job
+
+
+def _capture_exception(exc_info):
+    # type: (ExcInfo) -> None
+    scope = sentry_sdk.get_current_scope()
+
+    if scope.transaction is not None:
+        if exc_info[0] in ARQ_CONTROL_FLOW_EXCEPTIONS:
+            scope.transaction.set_status(SPANSTATUS.ABORTED)
+            return
+
+        scope.transaction.set_status(SPANSTATUS.INTERNAL_ERROR)
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": ArqIntegration.identifier, "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _make_event_processor(ctx, *args, **kwargs):
+    # type: (Dict[Any, Any], *Any, **Any) -> EventProcessor
+    def event_processor(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+
+        with capture_internal_exceptions():
+            scope = sentry_sdk.get_current_scope()
+            if scope.transaction is not None:
+                scope.transaction.name = ctx["job_name"]
+                event["transaction"] = ctx["job_name"]
+
+            tags = event.setdefault("tags", {})
+            tags["arq_task_id"] = ctx["job_id"]
+            tags["arq_task_retry"] = ctx["job_try"] > 1
+            extra = event.setdefault("extra", {})
+            extra["arq-job"] = {
+                "task": ctx["job_name"],
+                "args": (
+                    args if should_send_default_pii() else SENSITIVE_DATA_SUBSTITUTE
+                ),
+                "kwargs": (
+                    kwargs if should_send_default_pii() else SENSITIVE_DATA_SUBSTITUTE
+                ),
+                "retry": ctx["job_try"],
+            }
+
+        return event
+
+    return event_processor
+
+
+def _wrap_coroutine(name, coroutine):
+    # type: (str, WorkerCoroutine) -> WorkerCoroutine
+
+    async def _sentry_coroutine(ctx, *args, **kwargs):
+        # type: (Dict[Any, Any], *Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(ArqIntegration)
+        if integration is None:
+            return await coroutine(ctx, *args, **kwargs)
+
+        sentry_sdk.get_isolation_scope().add_event_processor(
+            _make_event_processor({**ctx, "job_name": name}, *args, **kwargs)
+        )
+
+        try:
+            result = await coroutine(ctx, *args, **kwargs)
+        except Exception:
+            exc_info = sys.exc_info()
+            _capture_exception(exc_info)
+            reraise(*exc_info)
+
+        return result
+
+    return _sentry_coroutine
+
+
+def patch_create_worker():
+    # type: () -> None
+    old_create_worker = arq.worker.create_worker
+
+    @ensure_integration_enabled(ArqIntegration, old_create_worker)
+    def _sentry_create_worker(*args, **kwargs):
+        # type: (*Any, **Any) -> Worker
+        settings_cls = args[0]
+
+        if isinstance(settings_cls, dict):
+            if "functions" in settings_cls:
+                settings_cls["functions"] = [
+                    _get_arq_function(func)
+                    for func in settings_cls.get("functions", [])
+                ]
+            if "cron_jobs" in settings_cls:
+                settings_cls["cron_jobs"] = [
+                    _get_arq_cron_job(cron_job)
+                    for cron_job in settings_cls.get("cron_jobs", [])
+                ]
+
+        if hasattr(settings_cls, "functions"):
+            settings_cls.functions = [
+                _get_arq_function(func) for func in settings_cls.functions
+            ]
+        if hasattr(settings_cls, "cron_jobs"):
+            settings_cls.cron_jobs = [
+                _get_arq_cron_job(cron_job) for cron_job in settings_cls.cron_jobs
+            ]
+
+        if "functions" in kwargs:
+            kwargs["functions"] = [
+                _get_arq_function(func) for func in kwargs.get("functions", [])
+            ]
+        if "cron_jobs" in kwargs:
+            kwargs["cron_jobs"] = [
+                _get_arq_cron_job(cron_job) for cron_job in kwargs.get("cron_jobs", [])
+            ]
+
+        return old_create_worker(*args, **kwargs)
+
+    arq.worker.create_worker = _sentry_create_worker
+
+
+def _get_arq_function(func):
+    # type: (Union[str, Function, WorkerCoroutine]) -> Function
+    arq_func = arq.worker.func(func)
+    arq_func.coroutine = _wrap_coroutine(arq_func.name, arq_func.coroutine)
+
+    return arq_func
+
+
+def _get_arq_cron_job(cron_job):
+    # type: (CronJob) -> CronJob
+    cron_job.coroutine = _wrap_coroutine(cron_job.name, cron_job.coroutine)
+
+    return cron_job
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asgi.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asgi.py
new file mode 100644
index 00000000..3569336a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asgi.py
@@ -0,0 +1,337 @@
+"""
+An ASGI middleware.
+
+Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`.
+"""
+
+import asyncio
+import inspect
+from copy import deepcopy
+from functools import partial
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP
+
+from sentry_sdk.integrations._asgi_common import (
+    _get_headers,
+    _get_request_data,
+    _get_url,
+)
+from sentry_sdk.integrations._wsgi_common import (
+    DEFAULT_HTTP_METHODS_TO_CAPTURE,
+    nullcontext,
+)
+from sentry_sdk.sessions import track_session
+from sentry_sdk.tracing import (
+    SOURCE_FOR_STYLE,
+    TransactionSource,
+)
+from sentry_sdk.utils import (
+    ContextVar,
+    event_from_exception,
+    HAS_REAL_CONTEXTVARS,
+    CONTEXTVARS_ERROR_MESSAGE,
+    logger,
+    transaction_from_function,
+    _get_installed_modules,
+)
+from sentry_sdk.tracing import Transaction
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Optional
+    from typing import Tuple
+
+    from sentry_sdk._types import Event, Hint
+
+
+_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
+
+_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+def _capture_exception(exc, mechanism_type="asgi"):
+    # type: (Any, str) -> None
+
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": mechanism_type, "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _looks_like_asgi3(app):
+    # type: (Any) -> bool
+    """
+    Try to figure out if an application object supports ASGI3.
+
+    This is how uvicorn figures out the application version as well.
+    """
+    if inspect.isclass(app):
+        return hasattr(app, "__await__")
+    elif inspect.isfunction(app):
+        return asyncio.iscoroutinefunction(app)
+    else:
+        call = getattr(app, "__call__", None)  # noqa
+        return asyncio.iscoroutinefunction(call)
+
+
+class SentryAsgiMiddleware:
+    __slots__ = (
+        "app",
+        "__call__",
+        "transaction_style",
+        "mechanism_type",
+        "span_origin",
+        "http_methods_to_capture",
+    )
+
+    def __init__(
+        self,
+        app,  # type: Any
+        unsafe_context_data=False,  # type: bool
+        transaction_style="endpoint",  # type: str
+        mechanism_type="asgi",  # type: str
+        span_origin="manual",  # type: str
+        http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE,  # type: Tuple[str, ...]
+    ):
+        # type: (...) -> None
+        """
+        Instrument an ASGI application with Sentry. Provides HTTP/websocket
+        data to sent events and basic handling for exceptions bubbling up
+        through the middleware.
+
+        :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
+        """
+        if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
+            # We better have contextvars or we're going to leak state between
+            # requests.
+            raise RuntimeError(
+                "The ASGI middleware for Sentry requires Python 3.7+ "
+                "or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
+            )
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+
+        asgi_middleware_while_using_starlette_or_fastapi = (
+            mechanism_type == "asgi" and "starlette" in _get_installed_modules()
+        )
+        if asgi_middleware_while_using_starlette_or_fastapi:
+            logger.warning(
+                "The Sentry Python SDK can now automatically support ASGI frameworks like Starlette and FastAPI. "
+                "Please remove 'SentryAsgiMiddleware' from your project. "
+                "See https://docs.sentry.io/platforms/python/guides/asgi/ for more information."
+            )
+
+        self.transaction_style = transaction_style
+        self.mechanism_type = mechanism_type
+        self.span_origin = span_origin
+        self.app = app
+        self.http_methods_to_capture = http_methods_to_capture
+
+        if _looks_like_asgi3(app):
+            self.__call__ = self._run_asgi3  # type: Callable[..., Any]
+        else:
+            self.__call__ = self._run_asgi2
+
+    def _run_asgi2(self, scope):
+        # type: (Any) -> Any
+        async def inner(receive, send):
+            # type: (Any, Any) -> Any
+            return await self._run_app(scope, receive, send, asgi_version=2)
+
+        return inner
+
+    async def _run_asgi3(self, scope, receive, send):
+        # type: (Any, Any, Any) -> Any
+        return await self._run_app(scope, receive, send, asgi_version=3)
+
+    async def _run_app(self, scope, receive, send, asgi_version):
+        # type: (Any, Any, Any, Any, int) -> Any
+        is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)
+        is_lifespan = scope["type"] == "lifespan"
+        if is_recursive_asgi_middleware or is_lifespan:
+            try:
+                if asgi_version == 2:
+                    return await self.app(scope)(receive, send)
+                else:
+                    return await self.app(scope, receive, send)
+
+            except Exception as exc:
+                _capture_exception(exc, mechanism_type=self.mechanism_type)
+                raise exc from None
+
+        _asgi_middleware_applied.set(True)
+        try:
+            with sentry_sdk.isolation_scope() as sentry_scope:
+                with track_session(sentry_scope, session_mode="request"):
+                    sentry_scope.clear_breadcrumbs()
+                    sentry_scope._name = "asgi"
+                    processor = partial(self.event_processor, asgi_scope=scope)
+                    sentry_scope.add_event_processor(processor)
+
+                    ty = scope["type"]
+                    (
+                        transaction_name,
+                        transaction_source,
+                    ) = self._get_transaction_name_and_source(
+                        self.transaction_style,
+                        scope,
+                    )
+
+                    method = scope.get("method", "").upper()
+                    transaction = None
+                    if method in self.http_methods_to_capture:
+                        if ty in ("http", "websocket"):
+                            transaction = continue_trace(
+                                _get_headers(scope),
+                                op="{}.server".format(ty),
+                                name=transaction_name,
+                                source=transaction_source,
+                                origin=self.span_origin,
+                            )
+                            logger.debug(
+                                "[ASGI] Created transaction (continuing trace): %s",
+                                transaction,
+                            )
+                        else:
+                            transaction = Transaction(
+                                op=OP.HTTP_SERVER,
+                                name=transaction_name,
+                                source=transaction_source,
+                                origin=self.span_origin,
+                            )
+                            logger.debug(
+                                "[ASGI] Created transaction (new): %s", transaction
+                            )
+
+                        transaction.set_tag("asgi.type", ty)
+                        logger.debug(
+                            "[ASGI] Set transaction name and source on transaction: '%s' / '%s'",
+                            transaction.name,
+                            transaction.source,
+                        )
+
+                    with (
+                        sentry_sdk.start_transaction(
+                            transaction,
+                            custom_sampling_context={"asgi_scope": scope},
+                        )
+                        if transaction is not None
+                        else nullcontext()
+                    ):
+                        logger.debug("[ASGI] Started transaction: %s", transaction)
+                        try:
+
+                            async def _sentry_wrapped_send(event):
+                                # type: (Dict[str, Any]) -> Any
+                                if transaction is not None:
+                                    is_http_response = (
+                                        event.get("type") == "http.response.start"
+                                        and "status" in event
+                                    )
+                                    if is_http_response:
+                                        transaction.set_http_status(event["status"])
+
+                                return await send(event)
+
+                            if asgi_version == 2:
+                                return await self.app(scope)(
+                                    receive, _sentry_wrapped_send
+                                )
+                            else:
+                                return await self.app(
+                                    scope, receive, _sentry_wrapped_send
+                                )
+                        except Exception as exc:
+                            _capture_exception(exc, mechanism_type=self.mechanism_type)
+                            raise exc from None
+        finally:
+            _asgi_middleware_applied.set(False)
+
+    def event_processor(self, event, hint, asgi_scope):
+        # type: (Event, Hint, Any) -> Optional[Event]
+        request_data = event.get("request", {})
+        request_data.update(_get_request_data(asgi_scope))
+        event["request"] = deepcopy(request_data)
+
+        # Only set transaction name if not already set by Starlette or FastAPI (or other frameworks)
+        transaction = event.get("transaction")
+        transaction_source = (event.get("transaction_info") or {}).get("source")
+        already_set = (
+            transaction is not None
+            and transaction != _DEFAULT_TRANSACTION_NAME
+            and transaction_source
+            in [
+                TransactionSource.COMPONENT,
+                TransactionSource.ROUTE,
+                TransactionSource.CUSTOM,
+            ]
+        )
+        if not already_set:
+            name, source = self._get_transaction_name_and_source(
+                self.transaction_style, asgi_scope
+            )
+            event["transaction"] = name
+            event["transaction_info"] = {"source": source}
+
+            logger.debug(
+                "[ASGI] Set transaction name and source in event_processor: '%s' / '%s'",
+                event["transaction"],
+                event["transaction_info"]["source"],
+            )
+
+        return event
+
+    # Helper functions.
+    #
+    # Note: Those functions are not public API. If you want to mutate request
+    # data to your liking it's recommended to use the `before_send` callback
+    # for that.
+
+    def _get_transaction_name_and_source(self, transaction_style, asgi_scope):
+        # type: (SentryAsgiMiddleware, str, Any) -> Tuple[str, str]
+        name = None
+        source = SOURCE_FOR_STYLE[transaction_style]
+        ty = asgi_scope.get("type")
+
+        if transaction_style == "endpoint":
+            endpoint = asgi_scope.get("endpoint")
+            # Webframeworks like Starlette mutate the ASGI env once routing is
+            # done, which is sometime after the request has started. If we have
+            # an endpoint, overwrite our generic transaction name.
+            if endpoint:
+                name = transaction_from_function(endpoint) or ""
+            else:
+                name = _get_url(asgi_scope, "http" if ty == "http" else "ws", host=None)
+                source = TransactionSource.URL
+
+        elif transaction_style == "url":
+            # FastAPI includes the route object in the scope to let Sentry extract the
+            # path from it for the transaction name
+            route = asgi_scope.get("route")
+            if route:
+                path = getattr(route, "path", None)
+                if path is not None:
+                    name = path
+            else:
+                name = _get_url(asgi_scope, "http" if ty == "http" else "ws", host=None)
+                source = TransactionSource.URL
+
+        if name is None:
+            name = _DEFAULT_TRANSACTION_NAME
+            source = TransactionSource.ROUTE
+            return name, source
+
+        return name, source
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncio.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncio.py
new file mode 100644
index 00000000..9326c16e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncio.py
@@ -0,0 +1,144 @@
+import sys
+import signal
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.utils import event_from_exception, logger, reraise
+
+try:
+    import asyncio
+    from asyncio.tasks import Task
+except ImportError:
+    raise DidNotEnable("asyncio not available")
+
+from typing import cast, TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from collections.abc import Coroutine
+
+    from sentry_sdk._types import ExcInfo
+
+
+def get_name(coro):
+    # type: (Any) -> str
+    return (
+        getattr(coro, "__qualname__", None)
+        or getattr(coro, "__name__", None)
+        or "coroutine without __name__"
+    )
+
+
+def patch_asyncio():
+    # type: () -> None
+    orig_task_factory = None
+    try:
+        loop = asyncio.get_running_loop()
+        orig_task_factory = loop.get_task_factory()
+
+        # Add a shutdown handler to log a helpful message
+        def shutdown_handler():
+            # type: () -> None
+            logger.info(
+                "AsyncIO is shutting down. If you see 'Task was destroyed but it is pending!' "
+                "errors with '_task_with_sentry_span_creation', these are normal during shutdown "
+                "and not a problem with your code or Sentry."
+            )
+
+        try:
+            loop.add_signal_handler(signal.SIGINT, shutdown_handler)
+            loop.add_signal_handler(signal.SIGTERM, shutdown_handler)
+        except (NotImplementedError, AttributeError):
+            # Signal handlers might not be supported on all platforms
+            pass
+
+        def _sentry_task_factory(loop, coro, **kwargs):
+            # type: (asyncio.AbstractEventLoop, Coroutine[Any, Any, Any], Any) -> asyncio.Future[Any]
+
+            async def _task_with_sentry_span_creation():
+                # type: () -> Any
+                result = None
+
+                with sentry_sdk.isolation_scope():
+                    with sentry_sdk.start_span(
+                        op=OP.FUNCTION,
+                        name=get_name(coro),
+                        origin=AsyncioIntegration.origin,
+                    ):
+                        try:
+                            result = await coro
+                        except Exception:
+                            reraise(*_capture_exception())
+
+                return result
+
+            task = None
+
+            # Trying to use user set task factory (if there is one)
+            if orig_task_factory:
+                task = orig_task_factory(
+                    loop, _task_with_sentry_span_creation(), **kwargs
+                )
+
+            if task is None:
+                # The default task factory in `asyncio` does not have its own function
+                # but is just a couple of lines in `asyncio.base_events.create_task()`
+                # Those lines are copied here.
+
+                # WARNING:
+                # If the default behavior of the task creation in asyncio changes,
+                # this will break!
+                task = Task(_task_with_sentry_span_creation(), loop=loop, **kwargs)
+                if task._source_traceback:  # type: ignore
+                    del task._source_traceback[-1]  # type: ignore
+
+            # Set the task name to include the original coroutine's name
+            try:
+                cast("asyncio.Task[Any]", task).set_name(
+                    f"{get_name(coro)} (Sentry-wrapped)"
+                )
+            except AttributeError:
+                # set_name might not be available in all Python versions
+                pass
+
+            return task
+
+        loop.set_task_factory(_sentry_task_factory)  # type: ignore
+
+    except RuntimeError:
+        # When there is no running loop, we have nothing to patch.
+        logger.warning(
+            "There is no running asyncio loop so there is nothing Sentry can patch. "
+            "Please make sure you call sentry_sdk.init() within a running "
+            "asyncio loop for the AsyncioIntegration to work. "
+            "See https://docs.sentry.io/platforms/python/integrations/asyncio/"
+        )
+
+
+def _capture_exception():
+    # type: () -> ExcInfo
+    exc_info = sys.exc_info()
+
+    client = sentry_sdk.get_client()
+
+    integration = client.get_integration(AsyncioIntegration)
+    if integration is not None:
+        event, hint = event_from_exception(
+            exc_info,
+            client_options=client.options,
+            mechanism={"type": "asyncio", "handled": False},
+        )
+        sentry_sdk.capture_event(event, hint=hint)
+
+    return exc_info
+
+
+class AsyncioIntegration(Integration):
+    identifier = "asyncio"
+    origin = f"auto.function.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        patch_asyncio()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncpg.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncpg.py
new file mode 100644
index 00000000..b6b53f46
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/asyncpg.py
@@ -0,0 +1,208 @@
+from __future__ import annotations
+import contextlib
+from typing import Any, TypeVar, Callable, Awaitable, Iterator
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.tracing import Span
+from sentry_sdk.tracing_utils import add_query_source, record_sql_queries
+from sentry_sdk.utils import (
+    ensure_integration_enabled,
+    parse_version,
+    capture_internal_exceptions,
+)
+
+try:
+    import asyncpg  # type: ignore[import-not-found]
+    from asyncpg.cursor import BaseCursor  # type: ignore
+
+except ImportError:
+    raise DidNotEnable("asyncpg not installed.")
+
+
+class AsyncPGIntegration(Integration):
+    identifier = "asyncpg"
+    origin = f"auto.db.{identifier}"
+    _record_params = False
+
+    def __init__(self, *, record_params: bool = False):
+        AsyncPGIntegration._record_params = record_params
+
+    @staticmethod
+    def setup_once() -> None:
+        # asyncpg.__version__ is a string containing the semantic version in the form of "<major>.<minor>.<patch>"
+        asyncpg_version = parse_version(asyncpg.__version__)
+        _check_minimum_version(AsyncPGIntegration, asyncpg_version)
+
+        asyncpg.Connection.execute = _wrap_execute(
+            asyncpg.Connection.execute,
+        )
+
+        asyncpg.Connection._execute = _wrap_connection_method(
+            asyncpg.Connection._execute
+        )
+        asyncpg.Connection._executemany = _wrap_connection_method(
+            asyncpg.Connection._executemany, executemany=True
+        )
+        asyncpg.Connection.cursor = _wrap_cursor_creation(asyncpg.Connection.cursor)
+        asyncpg.Connection.prepare = _wrap_connection_method(asyncpg.Connection.prepare)
+        asyncpg.connect_utils._connect_addr = _wrap_connect_addr(
+            asyncpg.connect_utils._connect_addr
+        )
+
+
+T = TypeVar("T")
+
+
+def _wrap_execute(f: Callable[..., Awaitable[T]]) -> Callable[..., Awaitable[T]]:
+    async def _inner(*args: Any, **kwargs: Any) -> T:
+        if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
+            return await f(*args, **kwargs)
+
+        # Avoid recording calls to _execute twice.
+        # Calls to Connection.execute with args also call
+        # Connection._execute, which is recorded separately
+        # args[0] = the connection object, args[1] is the query
+        if len(args) > 2:
+            return await f(*args, **kwargs)
+
+        query = args[1]
+        with record_sql_queries(
+            cursor=None,
+            query=query,
+            params_list=None,
+            paramstyle=None,
+            executemany=False,
+            span_origin=AsyncPGIntegration.origin,
+        ) as span:
+            res = await f(*args, **kwargs)
+
+        with capture_internal_exceptions():
+            add_query_source(span)
+
+        return res
+
+    return _inner
+
+
+SubCursor = TypeVar("SubCursor", bound=BaseCursor)
+
+
+@contextlib.contextmanager
+def _record(
+    cursor: SubCursor | None,
+    query: str,
+    params_list: tuple[Any, ...] | None,
+    *,
+    executemany: bool = False,
+) -> Iterator[Span]:
+    integration = sentry_sdk.get_client().get_integration(AsyncPGIntegration)
+    if integration is not None and not integration._record_params:
+        params_list = None
+
+    param_style = "pyformat" if params_list else None
+
+    with record_sql_queries(
+        cursor=cursor,
+        query=query,
+        params_list=params_list,
+        paramstyle=param_style,
+        executemany=executemany,
+        record_cursor_repr=cursor is not None,
+        span_origin=AsyncPGIntegration.origin,
+    ) as span:
+        yield span
+
+
+def _wrap_connection_method(
+    f: Callable[..., Awaitable[T]], *, executemany: bool = False
+) -> Callable[..., Awaitable[T]]:
+    async def _inner(*args: Any, **kwargs: Any) -> T:
+        if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
+            return await f(*args, **kwargs)
+        query = args[1]
+        params_list = args[2] if len(args) > 2 else None
+        with _record(None, query, params_list, executemany=executemany) as span:
+            _set_db_data(span, args[0])
+            res = await f(*args, **kwargs)
+
+        return res
+
+    return _inner
+
+
+def _wrap_cursor_creation(f: Callable[..., T]) -> Callable[..., T]:
+    @ensure_integration_enabled(AsyncPGIntegration, f)
+    def _inner(*args: Any, **kwargs: Any) -> T:  # noqa: N807
+        query = args[1]
+        params_list = args[2] if len(args) > 2 else None
+
+        with _record(
+            None,
+            query,
+            params_list,
+            executemany=False,
+        ) as span:
+            _set_db_data(span, args[0])
+            res = f(*args, **kwargs)
+            span.set_data("db.cursor", res)
+
+        return res
+
+    return _inner
+
+
+def _wrap_connect_addr(f: Callable[..., Awaitable[T]]) -> Callable[..., Awaitable[T]]:
+    async def _inner(*args: Any, **kwargs: Any) -> T:
+        if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
+            return await f(*args, **kwargs)
+
+        user = kwargs["params"].user
+        database = kwargs["params"].database
+
+        with sentry_sdk.start_span(
+            op=OP.DB,
+            name="connect",
+            origin=AsyncPGIntegration.origin,
+        ) as span:
+            span.set_data(SPANDATA.DB_SYSTEM, "postgresql")
+            addr = kwargs.get("addr")
+            if addr:
+                try:
+                    span.set_data(SPANDATA.SERVER_ADDRESS, addr[0])
+                    span.set_data(SPANDATA.SERVER_PORT, addr[1])
+                except IndexError:
+                    pass
+            span.set_data(SPANDATA.DB_NAME, database)
+            span.set_data(SPANDATA.DB_USER, user)
+
+            with capture_internal_exceptions():
+                sentry_sdk.add_breadcrumb(
+                    message="connect", category="query", data=span._data
+                )
+            res = await f(*args, **kwargs)
+
+        return res
+
+    return _inner
+
+
+def _set_db_data(span: Span, conn: Any) -> None:
+    span.set_data(SPANDATA.DB_SYSTEM, "postgresql")
+
+    addr = conn._addr
+    if addr:
+        try:
+            span.set_data(SPANDATA.SERVER_ADDRESS, addr[0])
+            span.set_data(SPANDATA.SERVER_PORT, addr[1])
+        except IndexError:
+            pass
+
+    database = conn._params.database
+    if database:
+        span.set_data(SPANDATA.DB_NAME, database)
+
+    user = conn._params.user
+    if user:
+        span.set_data(SPANDATA.DB_USER, user)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/atexit.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/atexit.py
new file mode 100644
index 00000000..dfc6d08e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/atexit.py
@@ -0,0 +1,57 @@
+import os
+import sys
+import atexit
+
+import sentry_sdk
+from sentry_sdk.utils import logger
+from sentry_sdk.integrations import Integration
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+
+
+def default_callback(pending, timeout):
+    # type: (int, int) -> None
+    """This is the default shutdown callback that is set on the options.
+    It prints out a message to stderr that informs the user that some events
+    are still pending and the process is waiting for them to flush out.
+    """
+
+    def echo(msg):
+        # type: (str) -> None
+        sys.stderr.write(msg + "\n")
+
+    echo("Sentry is attempting to send %i pending events" % pending)
+    echo("Waiting up to %s seconds" % timeout)
+    echo("Press Ctrl-%s to quit" % (os.name == "nt" and "Break" or "C"))
+    sys.stderr.flush()
+
+
+class AtexitIntegration(Integration):
+    identifier = "atexit"
+
+    def __init__(self, callback=None):
+        # type: (Optional[Any]) -> None
+        if callback is None:
+            callback = default_callback
+        self.callback = callback
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        @atexit.register
+        def _shutdown():
+            # type: () -> None
+            client = sentry_sdk.get_client()
+            integration = client.get_integration(AtexitIntegration)
+
+            if integration is None:
+                return
+
+            logger.debug("atexit: got shutdown signal")
+            logger.debug("atexit: shutting down client")
+            sentry_sdk.get_isolation_scope().end_session()
+
+            client.close(callback=integration.callback)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aws_lambda.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aws_lambda.py
new file mode 100644
index 00000000..4990fd6e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/aws_lambda.py
@@ -0,0 +1,499 @@
+import functools
+import json
+import re
+import sys
+from copy import deepcopy
+from datetime import datetime, timedelta, timezone
+from os import environ
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    logger,
+    TimeoutThread,
+    reraise,
+)
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import TypeVar
+    from typing import Callable
+    from typing import Optional
+
+    from sentry_sdk._types import EventProcessor, Event, Hint
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+# Constants
+TIMEOUT_WARNING_BUFFER = 1500  # Buffer time required to send timeout warning to Sentry
+MILLIS_TO_SECONDS = 1000.0
+
+
+def _wrap_init_error(init_error):
+    # type: (F) -> F
+    @ensure_integration_enabled(AwsLambdaIntegration, init_error)
+    def sentry_init_error(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        client = sentry_sdk.get_client()
+
+        with capture_internal_exceptions():
+            sentry_sdk.get_isolation_scope().clear_breadcrumbs()
+
+            exc_info = sys.exc_info()
+            if exc_info and all(exc_info):
+                sentry_event, hint = event_from_exception(
+                    exc_info,
+                    client_options=client.options,
+                    mechanism={"type": "aws_lambda", "handled": False},
+                )
+                sentry_sdk.capture_event(sentry_event, hint=hint)
+
+            else:
+                # Fall back to AWS lambdas JSON representation of the error
+                error_info = args[1]
+                if isinstance(error_info, str):
+                    error_info = json.loads(error_info)
+                sentry_event = _event_from_error_json(error_info)
+                sentry_sdk.capture_event(sentry_event)
+
+        return init_error(*args, **kwargs)
+
+    return sentry_init_error  # type: ignore
+
+
+def _wrap_handler(handler):
+    # type: (F) -> F
+    @functools.wraps(handler)
+    def sentry_handler(aws_event, aws_context, *args, **kwargs):
+        # type: (Any, Any, *Any, **Any) -> Any
+
+        # Per https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html,
+        # `event` here is *likely* a dictionary, but also might be a number of
+        # other types (str, int, float, None).
+        #
+        # In some cases, it is a list (if the user is batch-invoking their
+        # function, for example), in which case we'll use the first entry as a
+        # representative from which to try pulling request data. (Presumably it
+        # will be the same for all events in the list, since they're all hitting
+        # the lambda in the same request.)
+
+        client = sentry_sdk.get_client()
+        integration = client.get_integration(AwsLambdaIntegration)
+
+        if integration is None:
+            return handler(aws_event, aws_context, *args, **kwargs)
+
+        if isinstance(aws_event, list) and len(aws_event) >= 1:
+            request_data = aws_event[0]
+            batch_size = len(aws_event)
+        else:
+            request_data = aws_event
+            batch_size = 1
+
+        if not isinstance(request_data, dict):
+            # If we're not dealing with a dictionary, we won't be able to get
+            # headers, path, http method, etc in any case, so it's fine that
+            # this is empty
+            request_data = {}
+
+        configured_time = aws_context.get_remaining_time_in_millis()
+
+        with sentry_sdk.isolation_scope() as scope:
+            timeout_thread = None
+            with capture_internal_exceptions():
+                scope.clear_breadcrumbs()
+                scope.add_event_processor(
+                    _make_request_event_processor(
+                        request_data, aws_context, configured_time
+                    )
+                )
+                scope.set_tag(
+                    "aws_region", aws_context.invoked_function_arn.split(":")[3]
+                )
+                if batch_size > 1:
+                    scope.set_tag("batch_request", True)
+                    scope.set_tag("batch_size", batch_size)
+
+                # Starting the Timeout thread only if the configured time is greater than Timeout warning
+                # buffer and timeout_warning parameter is set True.
+                if (
+                    integration.timeout_warning
+                    and configured_time > TIMEOUT_WARNING_BUFFER
+                ):
+                    waiting_time = (
+                        configured_time - TIMEOUT_WARNING_BUFFER
+                    ) / MILLIS_TO_SECONDS
+
+                    timeout_thread = TimeoutThread(
+                        waiting_time,
+                        configured_time / MILLIS_TO_SECONDS,
+                    )
+
+                    # Starting the thread to raise timeout warning exception
+                    timeout_thread.start()
+
+            headers = request_data.get("headers", {})
+            # Some AWS Services (ie. EventBridge) set headers as a list
+            # or None, so we must ensure it is a dict
+            if not isinstance(headers, dict):
+                headers = {}
+
+            transaction = continue_trace(
+                headers,
+                op=OP.FUNCTION_AWS,
+                name=aws_context.function_name,
+                source=TransactionSource.COMPONENT,
+                origin=AwsLambdaIntegration.origin,
+            )
+            with sentry_sdk.start_transaction(
+                transaction,
+                custom_sampling_context={
+                    "aws_event": aws_event,
+                    "aws_context": aws_context,
+                },
+            ):
+                try:
+                    return handler(aws_event, aws_context, *args, **kwargs)
+                except Exception:
+                    exc_info = sys.exc_info()
+                    sentry_event, hint = event_from_exception(
+                        exc_info,
+                        client_options=client.options,
+                        mechanism={"type": "aws_lambda", "handled": False},
+                    )
+                    sentry_sdk.capture_event(sentry_event, hint=hint)
+                    reraise(*exc_info)
+                finally:
+                    if timeout_thread:
+                        timeout_thread.stop()
+
+    return sentry_handler  # type: ignore
+
+
+def _drain_queue():
+    # type: () -> None
+    with capture_internal_exceptions():
+        client = sentry_sdk.get_client()
+        integration = client.get_integration(AwsLambdaIntegration)
+        if integration is not None:
+            # Flush out the event queue before AWS kills the
+            # process.
+            client.flush()
+
+
+class AwsLambdaIntegration(Integration):
+    identifier = "aws_lambda"
+    origin = f"auto.function.{identifier}"
+
+    def __init__(self, timeout_warning=False):
+        # type: (bool) -> None
+        self.timeout_warning = timeout_warning
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        lambda_bootstrap = get_lambda_bootstrap()
+        if not lambda_bootstrap:
+            logger.warning(
+                "Not running in AWS Lambda environment, "
+                "AwsLambdaIntegration disabled (could not find bootstrap module)"
+            )
+            return
+
+        if not hasattr(lambda_bootstrap, "handle_event_request"):
+            logger.warning(
+                "Not running in AWS Lambda environment, "
+                "AwsLambdaIntegration disabled (could not find handle_event_request)"
+            )
+            return
+
+        pre_37 = hasattr(lambda_bootstrap, "handle_http_request")  # Python 3.6
+
+        if pre_37:
+            old_handle_event_request = lambda_bootstrap.handle_event_request
+
+            def sentry_handle_event_request(request_handler, *args, **kwargs):
+                # type: (Any, *Any, **Any) -> Any
+                request_handler = _wrap_handler(request_handler)
+                return old_handle_event_request(request_handler, *args, **kwargs)
+
+            lambda_bootstrap.handle_event_request = sentry_handle_event_request
+
+            old_handle_http_request = lambda_bootstrap.handle_http_request
+
+            def sentry_handle_http_request(request_handler, *args, **kwargs):
+                # type: (Any, *Any, **Any) -> Any
+                request_handler = _wrap_handler(request_handler)
+                return old_handle_http_request(request_handler, *args, **kwargs)
+
+            lambda_bootstrap.handle_http_request = sentry_handle_http_request
+
+            # Patch to_json to drain the queue. This should work even when the
+            # SDK is initialized inside of the handler
+
+            old_to_json = lambda_bootstrap.to_json
+
+            def sentry_to_json(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                _drain_queue()
+                return old_to_json(*args, **kwargs)
+
+            lambda_bootstrap.to_json = sentry_to_json
+        else:
+            lambda_bootstrap.LambdaRuntimeClient.post_init_error = _wrap_init_error(
+                lambda_bootstrap.LambdaRuntimeClient.post_init_error
+            )
+
+            old_handle_event_request = lambda_bootstrap.handle_event_request
+
+            def sentry_handle_event_request(  # type: ignore
+                lambda_runtime_client, request_handler, *args, **kwargs
+            ):
+                request_handler = _wrap_handler(request_handler)
+                return old_handle_event_request(
+                    lambda_runtime_client, request_handler, *args, **kwargs
+                )
+
+            lambda_bootstrap.handle_event_request = sentry_handle_event_request
+
+            # Patch the runtime client to drain the queue. This should work
+            # even when the SDK is initialized inside of the handler
+
+            def _wrap_post_function(f):
+                # type: (F) -> F
+                def inner(*args, **kwargs):
+                    # type: (*Any, **Any) -> Any
+                    _drain_queue()
+                    return f(*args, **kwargs)
+
+                return inner  # type: ignore
+
+            lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = (
+                _wrap_post_function(
+                    lambda_bootstrap.LambdaRuntimeClient.post_invocation_result
+                )
+            )
+            lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = (
+                _wrap_post_function(
+                    lambda_bootstrap.LambdaRuntimeClient.post_invocation_error
+                )
+            )
+
+
+def get_lambda_bootstrap():
+    # type: () -> Optional[Any]
+
+    # Python 3.7: If the bootstrap module is *already imported*, it is the
+    # one we actually want to use (no idea what's in __main__)
+    #
+    # Python 3.8: bootstrap is also importable, but will be the same file
+    # as __main__ imported under a different name:
+    #
+    #     sys.modules['__main__'].__file__ == sys.modules['bootstrap'].__file__
+    #     sys.modules['__main__'] is not sys.modules['bootstrap']
+    #
+    # Python 3.9: bootstrap is in __main__.awslambdaricmain
+    #
+    # On container builds using the `aws-lambda-python-runtime-interface-client`
+    # (awslamdaric) module, bootstrap is located in sys.modules['__main__'].bootstrap
+    #
+    # Such a setup would then make all monkeypatches useless.
+    if "bootstrap" in sys.modules:
+        return sys.modules["bootstrap"]
+    elif "__main__" in sys.modules:
+        module = sys.modules["__main__"]
+        # python3.9 runtime
+        if hasattr(module, "awslambdaricmain") and hasattr(
+            module.awslambdaricmain, "bootstrap"
+        ):
+            return module.awslambdaricmain.bootstrap
+        elif hasattr(module, "bootstrap"):
+            # awslambdaric python module in container builds
+            return module.bootstrap
+
+        # python3.8 runtime
+        return module
+    else:
+        return None
+
+
+def _make_request_event_processor(aws_event, aws_context, configured_timeout):
+    # type: (Any, Any, Any) -> EventProcessor
+    start_time = datetime.now(timezone.utc)
+
+    def event_processor(sentry_event, hint, start_time=start_time):
+        # type: (Event, Hint, datetime) -> Optional[Event]
+        remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
+        exec_duration = configured_timeout - remaining_time_in_milis
+
+        extra = sentry_event.setdefault("extra", {})
+        extra["lambda"] = {
+            "function_name": aws_context.function_name,
+            "function_version": aws_context.function_version,
+            "invoked_function_arn": aws_context.invoked_function_arn,
+            "aws_request_id": aws_context.aws_request_id,
+            "execution_duration_in_millis": exec_duration,
+            "remaining_time_in_millis": remaining_time_in_milis,
+        }
+
+        extra["cloudwatch logs"] = {
+            "url": _get_cloudwatch_logs_url(aws_context, start_time),
+            "log_group": aws_context.log_group_name,
+            "log_stream": aws_context.log_stream_name,
+        }
+
+        request = sentry_event.get("request", {})
+
+        if "httpMethod" in aws_event:
+            request["method"] = aws_event["httpMethod"]
+
+        request["url"] = _get_url(aws_event, aws_context)
+
+        if "queryStringParameters" in aws_event:
+            request["query_string"] = aws_event["queryStringParameters"]
+
+        if "headers" in aws_event:
+            request["headers"] = _filter_headers(aws_event["headers"])
+
+        if should_send_default_pii():
+            user_info = sentry_event.setdefault("user", {})
+
+            identity = aws_event.get("identity")
+            if identity is None:
+                identity = {}
+
+            id = identity.get("userArn")
+            if id is not None:
+                user_info.setdefault("id", id)
+
+            ip = identity.get("sourceIp")
+            if ip is not None:
+                user_info.setdefault("ip_address", ip)
+
+            if "body" in aws_event:
+                request["data"] = aws_event.get("body", "")
+        else:
+            if aws_event.get("body", None):
+                # Unfortunately couldn't find a way to get structured body from AWS
+                # event. Meaning every body is unstructured to us.
+                request["data"] = AnnotatedValue.removed_because_raw_data()
+
+        sentry_event["request"] = deepcopy(request)
+
+        return sentry_event
+
+    return event_processor
+
+
+def _get_url(aws_event, aws_context):
+    # type: (Any, Any) -> str
+    path = aws_event.get("path", None)
+
+    headers = aws_event.get("headers")
+    if headers is None:
+        headers = {}
+
+    host = headers.get("Host", None)
+    proto = headers.get("X-Forwarded-Proto", None)
+    if proto and host and path:
+        return "{}://{}{}".format(proto, host, path)
+    return "awslambda:///{}".format(aws_context.function_name)
+
+
+def _get_cloudwatch_logs_url(aws_context, start_time):
+    # type: (Any, datetime) -> str
+    """
+    Generates a CloudWatchLogs console URL based on the context object
+
+    Arguments:
+        aws_context {Any} -- context from lambda handler
+
+    Returns:
+        str -- AWS Console URL to logs.
+    """
+    formatstring = "%Y-%m-%dT%H:%M:%SZ"
+    region = environ.get("AWS_REGION", "")
+
+    url = (
+        "https://console.{domain}/cloudwatch/home?region={region}"
+        "#logEventViewer:group={log_group};stream={log_stream}"
+        ";start={start_time};end={end_time}"
+    ).format(
+        domain="amazonaws.cn" if region.startswith("cn-") else "aws.amazon.com",
+        region=region,
+        log_group=aws_context.log_group_name,
+        log_stream=aws_context.log_stream_name,
+        start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
+        end_time=(datetime.now(timezone.utc) + timedelta(seconds=2)).strftime(
+            formatstring
+        ),
+    )
+
+    return url
+
+
+def _parse_formatted_traceback(formatted_tb):
+    # type: (list[str]) -> list[dict[str, Any]]
+    frames = []
+    for frame in formatted_tb:
+        match = re.match(r'File "(.+)", line (\d+), in (.+)', frame.strip())
+        if match:
+            file_name, line_number, func_name = match.groups()
+            line_number = int(line_number)
+            frames.append(
+                {
+                    "filename": file_name,
+                    "function": func_name,
+                    "lineno": line_number,
+                    "vars": None,
+                    "pre_context": None,
+                    "context_line": None,
+                    "post_context": None,
+                }
+            )
+    return frames
+
+
+def _event_from_error_json(error_json):
+    # type: (dict[str, Any]) -> Event
+    """
+    Converts the error JSON from AWS Lambda into a Sentry error event.
+    This is not a full fletched event, but better than nothing.
+
+    This is an example of where AWS creates the error JSON:
+    https://github.com/aws/aws-lambda-python-runtime-interface-client/blob/2.2.1/awslambdaric/bootstrap.py#L479
+    """
+    event = {
+        "level": "error",
+        "exception": {
+            "values": [
+                {
+                    "type": error_json.get("errorType"),
+                    "value": error_json.get("errorMessage"),
+                    "stacktrace": {
+                        "frames": _parse_formatted_traceback(
+                            error_json.get("stackTrace", [])
+                        ),
+                    },
+                    "mechanism": {
+                        "type": "aws_lambda",
+                        "handled": False,
+                    },
+                }
+            ],
+        },
+    }  # type: Event
+
+    return event
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/beam.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/beam.py
new file mode 100644
index 00000000..a2e4553f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/beam.py
@@ -0,0 +1,176 @@
+import sys
+import types
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Iterator
+    from typing import TypeVar
+    from typing import Callable
+
+    from sentry_sdk._types import ExcInfo
+
+    T = TypeVar("T")
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+WRAPPED_FUNC = "_wrapped_{}_"
+INSPECT_FUNC = "_inspect_{}"  # Required format per apache_beam/transforms/core.py
+USED_FUNC = "_sentry_used_"
+
+
+class BeamIntegration(Integration):
+    identifier = "beam"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        from apache_beam.transforms.core import DoFn, ParDo  # type: ignore
+
+        ignore_logger("root")
+        ignore_logger("bundle_processor.create")
+
+        function_patches = ["process", "start_bundle", "finish_bundle", "setup"]
+        for func_name in function_patches:
+            setattr(
+                DoFn,
+                INSPECT_FUNC.format(func_name),
+                _wrap_inspect_call(DoFn, func_name),
+            )
+
+        old_init = ParDo.__init__
+
+        def sentry_init_pardo(self, fn, *args, **kwargs):
+            # type: (ParDo, Any, *Any, **Any) -> Any
+            # Do not monkey patch init twice
+            if not getattr(self, "_sentry_is_patched", False):
+                for func_name in function_patches:
+                    if not hasattr(fn, func_name):
+                        continue
+                    wrapped_func = WRAPPED_FUNC.format(func_name)
+
+                    # Check to see if inspect is set and process is not
+                    # to avoid monkey patching process twice.
+                    # Check to see if function is part of object for
+                    # backwards compatibility.
+                    process_func = getattr(fn, func_name)
+                    inspect_func = getattr(fn, INSPECT_FUNC.format(func_name))
+                    if not getattr(inspect_func, USED_FUNC, False) and not getattr(
+                        process_func, USED_FUNC, False
+                    ):
+                        setattr(fn, wrapped_func, process_func)
+                        setattr(fn, func_name, _wrap_task_call(process_func))
+
+                self._sentry_is_patched = True
+            old_init(self, fn, *args, **kwargs)
+
+        ParDo.__init__ = sentry_init_pardo
+
+
+def _wrap_inspect_call(cls, func_name):
+    # type: (Any, Any) -> Any
+
+    if not hasattr(cls, func_name):
+        return None
+
+    def _inspect(self):
+        # type: (Any) -> Any
+        """
+        Inspect function overrides the way Beam gets argspec.
+        """
+        wrapped_func = WRAPPED_FUNC.format(func_name)
+        if hasattr(self, wrapped_func):
+            process_func = getattr(self, wrapped_func)
+        else:
+            process_func = getattr(self, func_name)
+            setattr(self, func_name, _wrap_task_call(process_func))
+            setattr(self, wrapped_func, process_func)
+
+        # getfullargspec is deprecated in more recent beam versions and get_function_args_defaults
+        # (which uses Signatures internally) should be used instead.
+        try:
+            from apache_beam.transforms.core import get_function_args_defaults
+
+            return get_function_args_defaults(process_func)
+        except ImportError:
+            from apache_beam.typehints.decorators import getfullargspec  # type: ignore
+
+            return getfullargspec(process_func)
+
+    setattr(_inspect, USED_FUNC, True)
+    return _inspect
+
+
+def _wrap_task_call(func):
+    # type: (F) -> F
+    """
+    Wrap task call with a try catch to get exceptions.
+    """
+
+    @wraps(func)
+    def _inner(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        try:
+            gen = func(*args, **kwargs)
+        except Exception:
+            raise_exception()
+
+        if not isinstance(gen, types.GeneratorType):
+            return gen
+        return _wrap_generator_call(gen)
+
+    setattr(_inner, USED_FUNC, True)
+    return _inner  # type: ignore
+
+
+@ensure_integration_enabled(BeamIntegration)
+def _capture_exception(exc_info):
+    # type: (ExcInfo) -> None
+    """
+    Send Beam exception to Sentry.
+    """
+    client = sentry_sdk.get_client()
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=client.options,
+        mechanism={"type": "beam", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def raise_exception():
+    # type: () -> None
+    """
+    Raise an exception.
+    """
+    exc_info = sys.exc_info()
+    with capture_internal_exceptions():
+        _capture_exception(exc_info)
+    reraise(*exc_info)
+
+
+def _wrap_generator_call(gen):
+    # type: (Iterator[T]) -> Iterator[T]
+    """
+    Wrap the generator to handle any failures.
+    """
+    while True:
+        try:
+            yield next(gen)
+        except StopIteration:
+            break
+        except Exception:
+            raise_exception()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/boto3.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/boto3.py
new file mode 100644
index 00000000..0207341f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/boto3.py
@@ -0,0 +1,137 @@
+from functools import partial
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    parse_url,
+    parse_version,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Optional
+    from typing import Type
+
+try:
+    from botocore import __version__ as BOTOCORE_VERSION  # type: ignore
+    from botocore.client import BaseClient  # type: ignore
+    from botocore.response import StreamingBody  # type: ignore
+    from botocore.awsrequest import AWSRequest  # type: ignore
+except ImportError:
+    raise DidNotEnable("botocore is not installed")
+
+
+class Boto3Integration(Integration):
+    identifier = "boto3"
+    origin = f"auto.http.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(BOTOCORE_VERSION)
+        _check_minimum_version(Boto3Integration, version, "botocore")
+
+        orig_init = BaseClient.__init__
+
+        def sentry_patched_init(self, *args, **kwargs):
+            # type: (Type[BaseClient], *Any, **Any) -> None
+            orig_init(self, *args, **kwargs)
+            meta = self.meta
+            service_id = meta.service_model.service_id.hyphenize()
+            meta.events.register(
+                "request-created",
+                partial(_sentry_request_created, service_id=service_id),
+            )
+            meta.events.register("after-call", _sentry_after_call)
+            meta.events.register("after-call-error", _sentry_after_call_error)
+
+        BaseClient.__init__ = sentry_patched_init
+
+
+@ensure_integration_enabled(Boto3Integration)
+def _sentry_request_created(service_id, request, operation_name, **kwargs):
+    # type: (str, AWSRequest, str, **Any) -> None
+    description = "aws.%s.%s" % (service_id, operation_name)
+    span = sentry_sdk.start_span(
+        op=OP.HTTP_CLIENT,
+        name=description,
+        origin=Boto3Integration.origin,
+    )
+
+    with capture_internal_exceptions():
+        parsed_url = parse_url(request.url, sanitize=False)
+        span.set_data("aws.request.url", parsed_url.url)
+        span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query)
+        span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment)
+
+    span.set_tag("aws.service_id", service_id)
+    span.set_tag("aws.operation_name", operation_name)
+    span.set_data(SPANDATA.HTTP_METHOD, request.method)
+
+    # We do it in order for subsequent http calls/retries be
+    # attached to this span.
+    span.__enter__()
+
+    # request.context is an open-ended data-structure
+    # where we can add anything useful in request life cycle.
+    request.context["_sentrysdk_span"] = span
+
+
+def _sentry_after_call(context, parsed, **kwargs):
+    # type: (Dict[str, Any], Dict[str, Any], **Any) -> None
+    span = context.pop("_sentrysdk_span", None)  # type: Optional[Span]
+
+    # Span could be absent if the integration is disabled.
+    if span is None:
+        return
+    span.__exit__(None, None, None)
+
+    body = parsed.get("Body")
+    if not isinstance(body, StreamingBody):
+        return
+
+    streaming_span = span.start_child(
+        op=OP.HTTP_CLIENT_STREAM,
+        name=span.description,
+        origin=Boto3Integration.origin,
+    )
+
+    orig_read = body.read
+    orig_close = body.close
+
+    def sentry_streaming_body_read(*args, **kwargs):
+        # type: (*Any, **Any) -> bytes
+        try:
+            ret = orig_read(*args, **kwargs)
+            if not ret:
+                streaming_span.finish()
+            return ret
+        except Exception:
+            streaming_span.finish()
+            raise
+
+    body.read = sentry_streaming_body_read
+
+    def sentry_streaming_body_close(*args, **kwargs):
+        # type: (*Any, **Any) -> None
+        streaming_span.finish()
+        orig_close(*args, **kwargs)
+
+    body.close = sentry_streaming_body_close
+
+
+def _sentry_after_call_error(context, exception, **kwargs):
+    # type: (Dict[str, Any], Type[BaseException], **Any) -> None
+    span = context.pop("_sentrysdk_span", None)  # type: Optional[Span]
+
+    # Span could be absent if the integration is disabled.
+    if span is None:
+        return
+    span.__exit__(type(exception), exception, None)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/bottle.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/bottle.py
new file mode 100644
index 00000000..8a9fc412
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/bottle.py
@@ -0,0 +1,221 @@
+import functools
+
+import sentry_sdk
+from sentry_sdk.tracing import SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    parse_version,
+    transaction_from_function,
+)
+from sentry_sdk.integrations import (
+    Integration,
+    DidNotEnable,
+    _DEFAULT_FAILED_REQUEST_STATUS_CODES,
+    _check_minimum_version,
+)
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Set
+
+    from sentry_sdk.integrations.wsgi import _ScopedResponse
+    from typing import Any
+    from typing import Dict
+    from typing import Callable
+    from typing import Optional
+    from bottle import FileUpload, FormsDict, LocalRequest  # type: ignore
+
+    from sentry_sdk._types import EventProcessor, Event
+
+try:
+    from bottle import (
+        Bottle,
+        HTTPResponse,
+        Route,
+        request as bottle_request,
+        __version__ as BOTTLE_VERSION,
+    )
+except ImportError:
+    raise DidNotEnable("Bottle not installed")
+
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class BottleIntegration(Integration):
+    identifier = "bottle"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(
+        self,
+        transaction_style="endpoint",  # type: str
+        *,
+        failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES,  # type: Set[int]
+    ):
+        # type: (...) -> None
+
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+        self.failed_request_status_codes = failed_request_status_codes
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(BOTTLE_VERSION)
+        _check_minimum_version(BottleIntegration, version)
+
+        old_app = Bottle.__call__
+
+        @ensure_integration_enabled(BottleIntegration, old_app)
+        def sentry_patched_wsgi_app(self, environ, start_response):
+            # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+            middleware = SentryWsgiMiddleware(
+                lambda *a, **kw: old_app(self, *a, **kw),
+                span_origin=BottleIntegration.origin,
+            )
+
+            return middleware(environ, start_response)
+
+        Bottle.__call__ = sentry_patched_wsgi_app
+
+        old_handle = Bottle._handle
+
+        @functools.wraps(old_handle)
+        def _patched_handle(self, environ):
+            # type: (Bottle, Dict[str, Any]) -> Any
+            integration = sentry_sdk.get_client().get_integration(BottleIntegration)
+            if integration is None:
+                return old_handle(self, environ)
+
+            scope = sentry_sdk.get_isolation_scope()
+            scope._name = "bottle"
+            scope.add_event_processor(
+                _make_request_event_processor(self, bottle_request, integration)
+            )
+            res = old_handle(self, environ)
+
+            return res
+
+        Bottle._handle = _patched_handle
+
+        old_make_callback = Route._make_callback
+
+        @functools.wraps(old_make_callback)
+        def patched_make_callback(self, *args, **kwargs):
+            # type: (Route, *object, **object) -> Any
+            prepared_callback = old_make_callback(self, *args, **kwargs)
+
+            integration = sentry_sdk.get_client().get_integration(BottleIntegration)
+            if integration is None:
+                return prepared_callback
+
+            def wrapped_callback(*args, **kwargs):
+                # type: (*object, **object) -> Any
+                try:
+                    res = prepared_callback(*args, **kwargs)
+                except Exception as exception:
+                    _capture_exception(exception, handled=False)
+                    raise exception
+
+                if (
+                    isinstance(res, HTTPResponse)
+                    and res.status_code in integration.failed_request_status_codes
+                ):
+                    _capture_exception(res, handled=True)
+
+                return res
+
+            return wrapped_callback
+
+        Route._make_callback = patched_make_callback
+
+
+class BottleRequestExtractor(RequestExtractor):
+    def env(self):
+        # type: () -> Dict[str, str]
+        return self.request.environ
+
+    def cookies(self):
+        # type: () -> Dict[str, str]
+        return self.request.cookies
+
+    def raw_data(self):
+        # type: () -> bytes
+        return self.request.body.read()
+
+    def form(self):
+        # type: () -> FormsDict
+        if self.is_json():
+            return None
+        return self.request.forms.decode()
+
+    def files(self):
+        # type: () -> Optional[Dict[str, str]]
+        if self.is_json():
+            return None
+
+        return self.request.files
+
+    def size_of_file(self, file):
+        # type: (FileUpload) -> int
+        return file.content_length
+
+
+def _set_transaction_name_and_source(event, transaction_style, request):
+    # type: (Event, str, Any) -> None
+    name = ""
+
+    if transaction_style == "url":
+        try:
+            name = request.route.rule or ""
+        except RuntimeError:
+            pass
+
+    elif transaction_style == "endpoint":
+        try:
+            name = (
+                request.route.name
+                or transaction_from_function(request.route.callback)
+                or ""
+            )
+        except RuntimeError:
+            pass
+
+    event["transaction"] = name
+    event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
+
+
+def _make_request_event_processor(app, request, integration):
+    # type: (Bottle, LocalRequest, BottleIntegration) -> EventProcessor
+
+    def event_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        _set_transaction_name_and_source(event, integration.transaction_style, request)
+
+        with capture_internal_exceptions():
+            BottleRequestExtractor(request).extract_into_event(event)
+
+        return event
+
+    return event_processor
+
+
+def _capture_exception(exception, handled):
+    # type: (BaseException, bool) -> None
+    event, hint = event_from_exception(
+        exception,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "bottle", "handled": handled},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/__init__.py
new file mode 100644
index 00000000..e8811d76
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/__init__.py
@@ -0,0 +1,528 @@
+import sys
+from collections.abc import Mapping
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk import isolation_scope
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP, SPANSTATUS, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations.celery.beat import (
+    _patch_beat_apply_entry,
+    _patch_redbeat_maybe_due,
+    _setup_celery_beat_signals,
+)
+from sentry_sdk.integrations.celery.utils import _now_seconds_since_epoch
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.tracing import BAGGAGE_HEADER_NAME, TransactionSource
+from sentry_sdk.tracing_utils import Baggage
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import List
+    from typing import Optional
+    from typing import TypeVar
+    from typing import Union
+
+    from sentry_sdk._types import EventProcessor, Event, Hint, ExcInfo
+    from sentry_sdk.tracing import Span
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+try:
+    from celery import VERSION as CELERY_VERSION  # type: ignore
+    from celery.app.task import Task  # type: ignore
+    from celery.app.trace import task_has_custom
+    from celery.exceptions import (  # type: ignore
+        Ignore,
+        Reject,
+        Retry,
+        SoftTimeLimitExceeded,
+    )
+    from kombu import Producer  # type: ignore
+except ImportError:
+    raise DidNotEnable("Celery not installed")
+
+
+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
+
+
+class CeleryIntegration(Integration):
+    identifier = "celery"
+    origin = f"auto.queue.{identifier}"
+
+    def __init__(
+        self,
+        propagate_traces=True,
+        monitor_beat_tasks=False,
+        exclude_beat_tasks=None,
+    ):
+        # type: (bool, bool, Optional[List[str]]) -> None
+        self.propagate_traces = propagate_traces
+        self.monitor_beat_tasks = monitor_beat_tasks
+        self.exclude_beat_tasks = exclude_beat_tasks
+
+        _patch_beat_apply_entry()
+        _patch_redbeat_maybe_due()
+        _setup_celery_beat_signals(monitor_beat_tasks)
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _check_minimum_version(CeleryIntegration, CELERY_VERSION)
+
+        _patch_build_tracer()
+        _patch_task_apply_async()
+        _patch_celery_send_task()
+        _patch_worker_exit()
+        _patch_producer_publish()
+
+        # This logger logs every status of every task that ran on the worker.
+        # Meaning that every task's breadcrumbs are full of stuff like "Task
+        # <foo> raised unexpected <bar>".
+        ignore_logger("celery.worker.job")
+        ignore_logger("celery.app.trace")
+
+        # This is stdout/err redirected to a logger, can't deal with this
+        # (need event_level=logging.WARN to reproduce)
+        ignore_logger("celery.redirected")
+
+
+def _set_status(status):
+    # type: (str) -> None
+    with capture_internal_exceptions():
+        scope = sentry_sdk.get_current_scope()
+        if scope.span is not None:
+            scope.span.set_status(status)
+
+
+def _capture_exception(task, exc_info):
+    # type: (Any, ExcInfo) -> None
+    client = sentry_sdk.get_client()
+    if client.get_integration(CeleryIntegration) is None:
+        return
+
+    if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
+        # ??? Doesn't map to anything
+        _set_status("aborted")
+        return
+
+    _set_status("internal_error")
+
+    if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
+        return
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=client.options,
+        mechanism={"type": "celery", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _make_event_processor(task, uuid, args, kwargs, request=None):
+    # type: (Any, Any, Any, Any, Optional[Any]) -> EventProcessor
+    def event_processor(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+
+        with capture_internal_exceptions():
+            tags = event.setdefault("tags", {})
+            tags["celery_task_id"] = uuid
+            extra = event.setdefault("extra", {})
+            extra["celery-job"] = {
+                "task_name": task.name,
+                "args": args,
+                "kwargs": kwargs,
+            }
+
+        if "exc_info" in hint:
+            with capture_internal_exceptions():
+                if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
+                    event["fingerprint"] = [
+                        "celery",
+                        "SoftTimeLimitExceeded",
+                        getattr(task, "name", task),
+                    ]
+
+        return event
+
+    return event_processor
+
+
+def _update_celery_task_headers(original_headers, span, monitor_beat_tasks):
+    # type: (dict[str, Any], Optional[Span], bool) -> dict[str, Any]
+    """
+    Updates the headers of the Celery task with the tracing information
+    and eventually Sentry Crons monitoring information for beat tasks.
+    """
+    updated_headers = original_headers.copy()
+    with capture_internal_exceptions():
+        # if span is None (when the task was started by Celery Beat)
+        # this will return the trace headers from the scope.
+        headers = dict(
+            sentry_sdk.get_isolation_scope().iter_trace_propagation_headers(span=span)
+        )
+
+        if monitor_beat_tasks:
+            headers.update(
+                {
+                    "sentry-monitor-start-timestamp-s": "%.9f"
+                    % _now_seconds_since_epoch(),
+                }
+            )
+
+        # Add the time the task was enqueued to the headers
+        # This is used in the consumer to calculate the latency
+        updated_headers.update(
+            {"sentry-task-enqueued-time": _now_seconds_since_epoch()}
+        )
+
+        if headers:
+            existing_baggage = updated_headers.get(BAGGAGE_HEADER_NAME)
+            sentry_baggage = headers.get(BAGGAGE_HEADER_NAME)
+
+            combined_baggage = sentry_baggage or existing_baggage
+            if sentry_baggage and existing_baggage:
+                # Merge incoming and sentry baggage, where the sentry trace information
+                # in the incoming baggage takes precedence and the third-party items
+                # are concatenated.
+                incoming = Baggage.from_incoming_header(existing_baggage)
+                combined = Baggage.from_incoming_header(sentry_baggage)
+                combined.sentry_items.update(incoming.sentry_items)
+                combined.third_party_items = ",".join(
+                    [
+                        x
+                        for x in [
+                            combined.third_party_items,
+                            incoming.third_party_items,
+                        ]
+                        if x is not None and x != ""
+                    ]
+                )
+                combined_baggage = combined.serialize(include_third_party=True)
+
+            updated_headers.update(headers)
+            if combined_baggage:
+                updated_headers[BAGGAGE_HEADER_NAME] = combined_baggage
+
+            # https://github.com/celery/celery/issues/4875
+            #
+            # Need to setdefault the inner headers too since other
+            # tracing tools (dd-trace-py) also employ this exact
+            # workaround and we don't want to break them.
+            updated_headers.setdefault("headers", {}).update(headers)
+            if combined_baggage:
+                updated_headers["headers"][BAGGAGE_HEADER_NAME] = combined_baggage
+
+            # Add the Sentry options potentially added in `sentry_apply_entry`
+            # to the headers (done when auto-instrumenting Celery Beat tasks)
+            for key, value in updated_headers.items():
+                if key.startswith("sentry-"):
+                    updated_headers["headers"][key] = value
+
+    return updated_headers
+
+
+class NoOpMgr:
+    def __enter__(self):
+        # type: () -> None
+        return None
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        # type: (Any, Any, Any) -> None
+        return None
+
+
+def _wrap_task_run(f):
+    # type: (F) -> F
+    @wraps(f)
+    def apply_async(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        # Note: kwargs can contain headers=None, so no setdefault!
+        # Unsure which backend though.
+        integration = sentry_sdk.get_client().get_integration(CeleryIntegration)
+        if integration is None:
+            return f(*args, **kwargs)
+
+        kwarg_headers = kwargs.get("headers") or {}
+        propagate_traces = kwarg_headers.pop(
+            "sentry-propagate-traces", integration.propagate_traces
+        )
+
+        if not propagate_traces:
+            return f(*args, **kwargs)
+
+        if isinstance(args[0], Task):
+            task_name = args[0].name  # type: str
+        elif len(args) > 1 and isinstance(args[1], str):
+            task_name = args[1]
+        else:
+            task_name = "<unknown Celery task>"
+
+        task_started_from_beat = sentry_sdk.get_isolation_scope()._name == "celery-beat"
+
+        span_mgr = (
+            sentry_sdk.start_span(
+                op=OP.QUEUE_SUBMIT_CELERY,
+                name=task_name,
+                origin=CeleryIntegration.origin,
+            )
+            if not task_started_from_beat
+            else NoOpMgr()
+        )  # type: Union[Span, NoOpMgr]
+
+        with span_mgr as span:
+            kwargs["headers"] = _update_celery_task_headers(
+                kwarg_headers, span, integration.monitor_beat_tasks
+            )
+            return f(*args, **kwargs)
+
+    return apply_async  # type: ignore
+
+
+def _wrap_tracer(task, f):
+    # type: (Any, F) -> F
+
+    # Need to wrap tracer for pushing the scope before prerun is sent, and
+    # popping it after postrun is sent.
+    #
+    # This is the reason we don't use signals for hooking in the first place.
+    # Also because in Celery 3, signal dispatch returns early if one handler
+    # crashes.
+    @wraps(f)
+    @ensure_integration_enabled(CeleryIntegration, f)
+    def _inner(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        with isolation_scope() as scope:
+            scope._name = "celery"
+            scope.clear_breadcrumbs()
+            scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
+
+            transaction = None
+
+            # Celery task objects are not a thing to be trusted. Even
+            # something such as attribute access can fail.
+            with capture_internal_exceptions():
+                headers = args[3].get("headers") or {}
+                transaction = continue_trace(
+                    headers,
+                    op=OP.QUEUE_TASK_CELERY,
+                    name="unknown celery task",
+                    source=TransactionSource.TASK,
+                    origin=CeleryIntegration.origin,
+                )
+                transaction.name = task.name
+                transaction.set_status(SPANSTATUS.OK)
+
+            if transaction is None:
+                return f(*args, **kwargs)
+
+            with sentry_sdk.start_transaction(
+                transaction,
+                custom_sampling_context={
+                    "celery_job": {
+                        "task": task.name,
+                        # for some reason, args[1] is a list if non-empty but a
+                        # tuple if empty
+                        "args": list(args[1]),
+                        "kwargs": args[2],
+                    }
+                },
+            ):
+                return f(*args, **kwargs)
+
+    return _inner  # type: ignore
+
+
+def _set_messaging_destination_name(task, span):
+    # type: (Any, Span) -> None
+    """Set "messaging.destination.name" tag for span"""
+    with capture_internal_exceptions():
+        delivery_info = task.request.delivery_info
+        if delivery_info:
+            routing_key = delivery_info.get("routing_key")
+            if delivery_info.get("exchange") == "" and routing_key is not None:
+                # Empty exchange indicates the default exchange, meaning the tasks
+                # are sent to the queue with the same name as the routing key.
+                span.set_data(SPANDATA.MESSAGING_DESTINATION_NAME, routing_key)
+
+
+def _wrap_task_call(task, f):
+    # type: (Any, F) -> F
+
+    # Need to wrap task call because the exception is caught before we get to
+    # see it. Also celery's reported stacktrace is untrustworthy.
+
+    # functools.wraps is important here because celery-once looks at this
+    # method's name. @ensure_integration_enabled internally calls functools.wraps,
+    # but if we ever remove the @ensure_integration_enabled decorator, we need
+    # to add @functools.wraps(f) here.
+    # https://github.com/getsentry/sentry-python/issues/421
+    @ensure_integration_enabled(CeleryIntegration, f)
+    def _inner(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        try:
+            with sentry_sdk.start_span(
+                op=OP.QUEUE_PROCESS,
+                name=task.name,
+                origin=CeleryIntegration.origin,
+            ) as span:
+                _set_messaging_destination_name(task, span)
+
+                latency = None
+                with capture_internal_exceptions():
+                    if (
+                        task.request.headers is not None
+                        and "sentry-task-enqueued-time" in task.request.headers
+                    ):
+                        latency = _now_seconds_since_epoch() - task.request.headers.pop(
+                            "sentry-task-enqueued-time"
+                        )
+
+                if latency is not None:
+                    span.set_data(SPANDATA.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency)
+
+                with capture_internal_exceptions():
+                    span.set_data(SPANDATA.MESSAGING_MESSAGE_ID, task.request.id)
+
+                with capture_internal_exceptions():
+                    span.set_data(
+                        SPANDATA.MESSAGING_MESSAGE_RETRY_COUNT, task.request.retries
+                    )
+
+                with capture_internal_exceptions():
+                    span.set_data(
+                        SPANDATA.MESSAGING_SYSTEM,
+                        task.app.connection().transport.driver_type,
+                    )
+
+                return f(*args, **kwargs)
+        except Exception:
+            exc_info = sys.exc_info()
+            with capture_internal_exceptions():
+                _capture_exception(task, exc_info)
+            reraise(*exc_info)
+
+    return _inner  # type: ignore
+
+
+def _patch_build_tracer():
+    # type: () -> None
+    import celery.app.trace as trace  # type: ignore
+
+    original_build_tracer = trace.build_tracer
+
+    def sentry_build_tracer(name, task, *args, **kwargs):
+        # type: (Any, Any, *Any, **Any) -> Any
+        if not getattr(task, "_sentry_is_patched", False):
+            # determine whether Celery will use __call__ or run and patch
+            # accordingly
+            if task_has_custom(task, "__call__"):
+                type(task).__call__ = _wrap_task_call(task, type(task).__call__)
+            else:
+                task.run = _wrap_task_call(task, task.run)
+
+            # `build_tracer` is apparently called for every task
+            # invocation. Can't wrap every celery task for every invocation
+            # or we will get infinitely nested wrapper functions.
+            task._sentry_is_patched = True
+
+        return _wrap_tracer(task, original_build_tracer(name, task, *args, **kwargs))
+
+    trace.build_tracer = sentry_build_tracer
+
+
+def _patch_task_apply_async():
+    # type: () -> None
+    Task.apply_async = _wrap_task_run(Task.apply_async)
+
+
+def _patch_celery_send_task():
+    # type: () -> None
+    from celery import Celery
+
+    Celery.send_task = _wrap_task_run(Celery.send_task)
+
+
+def _patch_worker_exit():
+    # type: () -> None
+
+    # Need to flush queue before worker shutdown because a crashing worker will
+    # call os._exit
+    from billiard.pool import Worker  # type: ignore
+
+    original_workloop = Worker.workloop
+
+    def sentry_workloop(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        try:
+            return original_workloop(*args, **kwargs)
+        finally:
+            with capture_internal_exceptions():
+                if (
+                    sentry_sdk.get_client().get_integration(CeleryIntegration)
+                    is not None
+                ):
+                    sentry_sdk.flush()
+
+    Worker.workloop = sentry_workloop
+
+
+def _patch_producer_publish():
+    # type: () -> None
+    original_publish = Producer.publish
+
+    @ensure_integration_enabled(CeleryIntegration, original_publish)
+    def sentry_publish(self, *args, **kwargs):
+        # type: (Producer, *Any, **Any) -> Any
+        kwargs_headers = kwargs.get("headers", {})
+        if not isinstance(kwargs_headers, Mapping):
+            # Ensure kwargs_headers is a Mapping, so we can safely call get().
+            # We don't expect this to happen, but it's better to be safe. Even
+            # if it does happen, only our instrumentation breaks. This line
+            # does not overwrite kwargs["headers"], so the original publish
+            # method will still work.
+            kwargs_headers = {}
+
+        task_name = kwargs_headers.get("task")
+        task_id = kwargs_headers.get("id")
+        retries = kwargs_headers.get("retries")
+
+        routing_key = kwargs.get("routing_key")
+        exchange = kwargs.get("exchange")
+
+        with sentry_sdk.start_span(
+            op=OP.QUEUE_PUBLISH,
+            name=task_name,
+            origin=CeleryIntegration.origin,
+        ) as span:
+            if task_id is not None:
+                span.set_data(SPANDATA.MESSAGING_MESSAGE_ID, task_id)
+
+            if exchange == "" and routing_key is not None:
+                # Empty exchange indicates the default exchange, meaning messages are
+                # routed to the queue with the same name as the routing key.
+                span.set_data(SPANDATA.MESSAGING_DESTINATION_NAME, routing_key)
+
+            if retries is not None:
+                span.set_data(SPANDATA.MESSAGING_MESSAGE_RETRY_COUNT, retries)
+
+            with capture_internal_exceptions():
+                span.set_data(
+                    SPANDATA.MESSAGING_SYSTEM, self.connection.transport.driver_type
+                )
+
+            return original_publish(self, *args, **kwargs)
+
+    Producer.publish = sentry_publish
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/beat.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/beat.py
new file mode 100644
index 00000000..ddbc8561
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/beat.py
@@ -0,0 +1,293 @@
+import sentry_sdk
+from sentry_sdk.crons import capture_checkin, MonitorStatus
+from sentry_sdk.integrations import DidNotEnable
+from sentry_sdk.integrations.celery.utils import (
+    _get_humanized_interval,
+    _now_seconds_since_epoch,
+)
+from sentry_sdk.utils import (
+    logger,
+    match_regex_list,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import Any, Optional, TypeVar, Union
+    from sentry_sdk._types import (
+        MonitorConfig,
+        MonitorConfigScheduleType,
+        MonitorConfigScheduleUnit,
+    )
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+try:
+    from celery import Task, Celery  # type: ignore
+    from celery.beat import Scheduler  # type: ignore
+    from celery.schedules import crontab, schedule  # type: ignore
+    from celery.signals import (  # type: ignore
+        task_failure,
+        task_success,
+        task_retry,
+    )
+except ImportError:
+    raise DidNotEnable("Celery not installed")
+
+try:
+    from redbeat.schedulers import RedBeatScheduler  # type: ignore
+except ImportError:
+    RedBeatScheduler = None
+
+
+def _get_headers(task):
+    # type: (Task) -> dict[str, Any]
+    headers = task.request.get("headers") or {}
+
+    # flatten nested headers
+    if "headers" in headers:
+        headers.update(headers["headers"])
+        del headers["headers"]
+
+    headers.update(task.request.get("properties") or {})
+
+    return headers
+
+
+def _get_monitor_config(celery_schedule, app, monitor_name):
+    # type: (Any, Celery, str) -> MonitorConfig
+    monitor_config = {}  # type: MonitorConfig
+    schedule_type = None  # type: Optional[MonitorConfigScheduleType]
+    schedule_value = None  # type: Optional[Union[str, int]]
+    schedule_unit = None  # type: Optional[MonitorConfigScheduleUnit]
+
+    if isinstance(celery_schedule, crontab):
+        schedule_type = "crontab"
+        schedule_value = (
+            "{0._orig_minute} "
+            "{0._orig_hour} "
+            "{0._orig_day_of_month} "
+            "{0._orig_month_of_year} "
+            "{0._orig_day_of_week}".format(celery_schedule)
+        )
+    elif isinstance(celery_schedule, schedule):
+        schedule_type = "interval"
+        (schedule_value, schedule_unit) = _get_humanized_interval(
+            celery_schedule.seconds
+        )
+
+        if schedule_unit == "second":
+            logger.warning(
+                "Intervals shorter than one minute are not supported by Sentry Crons. Monitor '%s' has an interval of %s seconds. Use the `exclude_beat_tasks` option in the celery integration to exclude it.",
+                monitor_name,
+                schedule_value,
+            )
+            return {}
+
+    else:
+        logger.warning(
+            "Celery schedule type '%s' not supported by Sentry Crons.",
+            type(celery_schedule),
+        )
+        return {}
+
+    monitor_config["schedule"] = {}
+    monitor_config["schedule"]["type"] = schedule_type
+    monitor_config["schedule"]["value"] = schedule_value
+
+    if schedule_unit is not None:
+        monitor_config["schedule"]["unit"] = schedule_unit
+
+    monitor_config["timezone"] = (
+        (
+            hasattr(celery_schedule, "tz")
+            and celery_schedule.tz is not None
+            and str(celery_schedule.tz)
+        )
+        or app.timezone
+        or "UTC"
+    )
+
+    return monitor_config
+
+
+def _apply_crons_data_to_schedule_entry(scheduler, schedule_entry, integration):
+    # type: (Any, Any, sentry_sdk.integrations.celery.CeleryIntegration) -> None
+    """
+    Add Sentry Crons information to the schedule_entry headers.
+    """
+    if not integration.monitor_beat_tasks:
+        return
+
+    monitor_name = schedule_entry.name
+
+    task_should_be_excluded = match_regex_list(
+        monitor_name, integration.exclude_beat_tasks
+    )
+    if task_should_be_excluded:
+        return
+
+    celery_schedule = schedule_entry.schedule
+    app = scheduler.app
+
+    monitor_config = _get_monitor_config(celery_schedule, app, monitor_name)
+
+    is_supported_schedule = bool(monitor_config)
+    if not is_supported_schedule:
+        return
+
+    headers = schedule_entry.options.pop("headers", {})
+    headers.update(
+        {
+            "sentry-monitor-slug": monitor_name,
+            "sentry-monitor-config": monitor_config,
+        }
+    )
+
+    check_in_id = capture_checkin(
+        monitor_slug=monitor_name,
+        monitor_config=monitor_config,
+        status=MonitorStatus.IN_PROGRESS,
+    )
+    headers.update({"sentry-monitor-check-in-id": check_in_id})
+
+    # Set the Sentry configuration in the options of the ScheduleEntry.
+    # Those will be picked up in `apply_async` and added to the headers.
+    schedule_entry.options["headers"] = headers
+
+
+def _wrap_beat_scheduler(original_function):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+    """
+    Makes sure that:
+    - a new Sentry trace is started for each task started by Celery Beat and
+      it is propagated to the task.
+    - the Sentry Crons information is set in the Celery Beat task's
+      headers so that is is monitored with Sentry Crons.
+
+    After the patched function is called,
+    Celery Beat will call apply_async to put the task in the queue.
+    """
+    # Patch only once
+    # Can't use __name__ here, because some of our tests mock original_apply_entry
+    already_patched = "sentry_patched_scheduler" in str(original_function)
+    if already_patched:
+        return original_function
+
+    from sentry_sdk.integrations.celery import CeleryIntegration
+
+    def sentry_patched_scheduler(*args, **kwargs):
+        # type: (*Any, **Any) -> None
+        integration = sentry_sdk.get_client().get_integration(CeleryIntegration)
+        if integration is None:
+            return original_function(*args, **kwargs)
+
+        # Tasks started by Celery Beat start a new Trace
+        scope = sentry_sdk.get_isolation_scope()
+        scope.set_new_propagation_context()
+        scope._name = "celery-beat"
+
+        scheduler, schedule_entry = args
+        _apply_crons_data_to_schedule_entry(scheduler, schedule_entry, integration)
+
+        return original_function(*args, **kwargs)
+
+    return sentry_patched_scheduler
+
+
+def _patch_beat_apply_entry():
+    # type: () -> None
+    Scheduler.apply_entry = _wrap_beat_scheduler(Scheduler.apply_entry)
+
+
+def _patch_redbeat_maybe_due():
+    # type: () -> None
+    if RedBeatScheduler is None:
+        return
+
+    RedBeatScheduler.maybe_due = _wrap_beat_scheduler(RedBeatScheduler.maybe_due)
+
+
+def _setup_celery_beat_signals(monitor_beat_tasks):
+    # type: (bool) -> None
+    if monitor_beat_tasks:
+        task_success.connect(crons_task_success)
+        task_failure.connect(crons_task_failure)
+        task_retry.connect(crons_task_retry)
+
+
+def crons_task_success(sender, **kwargs):
+    # type: (Task, dict[Any, Any]) -> None
+    logger.debug("celery_task_success %s", sender)
+    headers = _get_headers(sender)
+
+    if "sentry-monitor-slug" not in headers:
+        return
+
+    monitor_config = headers.get("sentry-monitor-config", {})
+
+    start_timestamp_s = headers.get("sentry-monitor-start-timestamp-s")
+
+    capture_checkin(
+        monitor_slug=headers["sentry-monitor-slug"],
+        monitor_config=monitor_config,
+        check_in_id=headers["sentry-monitor-check-in-id"],
+        duration=(
+            _now_seconds_since_epoch() - float(start_timestamp_s)
+            if start_timestamp_s
+            else None
+        ),
+        status=MonitorStatus.OK,
+    )
+
+
+def crons_task_failure(sender, **kwargs):
+    # type: (Task, dict[Any, Any]) -> None
+    logger.debug("celery_task_failure %s", sender)
+    headers = _get_headers(sender)
+
+    if "sentry-monitor-slug" not in headers:
+        return
+
+    monitor_config = headers.get("sentry-monitor-config", {})
+
+    start_timestamp_s = headers.get("sentry-monitor-start-timestamp-s")
+
+    capture_checkin(
+        monitor_slug=headers["sentry-monitor-slug"],
+        monitor_config=monitor_config,
+        check_in_id=headers["sentry-monitor-check-in-id"],
+        duration=(
+            _now_seconds_since_epoch() - float(start_timestamp_s)
+            if start_timestamp_s
+            else None
+        ),
+        status=MonitorStatus.ERROR,
+    )
+
+
+def crons_task_retry(sender, **kwargs):
+    # type: (Task, dict[Any, Any]) -> None
+    logger.debug("celery_task_retry %s", sender)
+    headers = _get_headers(sender)
+
+    if "sentry-monitor-slug" not in headers:
+        return
+
+    monitor_config = headers.get("sentry-monitor-config", {})
+
+    start_timestamp_s = headers.get("sentry-monitor-start-timestamp-s")
+
+    capture_checkin(
+        monitor_slug=headers["sentry-monitor-slug"],
+        monitor_config=monitor_config,
+        check_in_id=headers["sentry-monitor-check-in-id"],
+        duration=(
+            _now_seconds_since_epoch() - float(start_timestamp_s)
+            if start_timestamp_s
+            else None
+        ),
+        status=MonitorStatus.ERROR,
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/utils.py
new file mode 100644
index 00000000..a1961b15
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/celery/utils.py
@@ -0,0 +1,43 @@
+import time
+from typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING:
+    from typing import Any, Tuple
+    from sentry_sdk._types import MonitorConfigScheduleUnit
+
+
+def _now_seconds_since_epoch():
+    # type: () -> float
+    # We cannot use `time.perf_counter()` when dealing with the duration
+    # of a Celery task, because the start of a Celery task and
+    # the end are recorded in different processes.
+    # Start happens in the Celery Beat process,
+    # the end in a Celery Worker process.
+    return time.time()
+
+
+def _get_humanized_interval(seconds):
+    # type: (float) -> Tuple[int, MonitorConfigScheduleUnit]
+    TIME_UNITS = (  # noqa: N806
+        ("day", 60 * 60 * 24.0),
+        ("hour", 60 * 60.0),
+        ("minute", 60.0),
+    )
+
+    seconds = float(seconds)
+    for unit, divider in TIME_UNITS:
+        if seconds >= divider:
+            interval = int(seconds / divider)
+            return (interval, cast("MonitorConfigScheduleUnit", unit))
+
+    return (int(seconds), "second")
+
+
+class NoOpMgr:
+    def __enter__(self):
+        # type: () -> None
+        return None
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        # type: (Any, Any, Any) -> None
+        return None
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/chalice.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/chalice.py
new file mode 100644
index 00000000..947e41eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/chalice.py
@@ -0,0 +1,134 @@
+import sys
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.aws_lambda import _make_request_event_processor
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+    parse_version,
+    reraise,
+)
+
+try:
+    import chalice  # type: ignore
+    from chalice import __version__ as CHALICE_VERSION
+    from chalice import Chalice, ChaliceViewError
+    from chalice.app import EventSourceHandler as ChaliceEventSourceHandler  # type: ignore
+except ImportError:
+    raise DidNotEnable("Chalice is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import TypeVar
+    from typing import Callable
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+class EventSourceHandler(ChaliceEventSourceHandler):  # type: ignore
+    def __call__(self, event, context):
+        # type: (Any, Any) -> Any
+        client = sentry_sdk.get_client()
+
+        with sentry_sdk.isolation_scope() as scope:
+            with capture_internal_exceptions():
+                configured_time = context.get_remaining_time_in_millis()
+                scope.add_event_processor(
+                    _make_request_event_processor(event, context, configured_time)
+                )
+            try:
+                return ChaliceEventSourceHandler.__call__(self, event, context)
+            except Exception:
+                exc_info = sys.exc_info()
+                event, hint = event_from_exception(
+                    exc_info,
+                    client_options=client.options,
+                    mechanism={"type": "chalice", "handled": False},
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+                client.flush()
+                reraise(*exc_info)
+
+
+def _get_view_function_response(app, view_function, function_args):
+    # type: (Any, F, Any) -> F
+    @wraps(view_function)
+    def wrapped_view_function(**function_args):
+        # type: (**Any) -> Any
+        client = sentry_sdk.get_client()
+        with sentry_sdk.isolation_scope() as scope:
+            with capture_internal_exceptions():
+                configured_time = app.lambda_context.get_remaining_time_in_millis()
+                scope.set_transaction_name(
+                    app.lambda_context.function_name,
+                    source=TransactionSource.COMPONENT,
+                )
+
+                scope.add_event_processor(
+                    _make_request_event_processor(
+                        app.current_request.to_dict(),
+                        app.lambda_context,
+                        configured_time,
+                    )
+                )
+            try:
+                return view_function(**function_args)
+            except Exception as exc:
+                if isinstance(exc, ChaliceViewError):
+                    raise
+                exc_info = sys.exc_info()
+                event, hint = event_from_exception(
+                    exc_info,
+                    client_options=client.options,
+                    mechanism={"type": "chalice", "handled": False},
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+                client.flush()
+                raise
+
+    return wrapped_view_function  # type: ignore
+
+
+class ChaliceIntegration(Integration):
+    identifier = "chalice"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        version = parse_version(CHALICE_VERSION)
+
+        if version is None:
+            raise DidNotEnable("Unparsable Chalice version: {}".format(CHALICE_VERSION))
+
+        if version < (1, 20):
+            old_get_view_function_response = Chalice._get_view_function_response
+        else:
+            from chalice.app import RestAPIEventHandler
+
+            old_get_view_function_response = (
+                RestAPIEventHandler._get_view_function_response
+            )
+
+        def sentry_event_response(app, view_function, function_args):
+            # type: (Any, F, Dict[str, Any]) -> Any
+            wrapped_view_function = _get_view_function_response(
+                app, view_function, function_args
+            )
+
+            return old_get_view_function_response(
+                app, wrapped_view_function, function_args
+            )
+
+        if version < (1, 20):
+            Chalice._get_view_function_response = sentry_event_response
+        else:
+            RestAPIEventHandler._get_view_function_response = sentry_event_response
+        # for everything else (like events)
+        chalice.app.EventSourceHandler = EventSourceHandler
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/clickhouse_driver.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/clickhouse_driver.py
new file mode 100644
index 00000000..2561bfad
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/clickhouse_driver.py
@@ -0,0 +1,157 @@
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.tracing import Span
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import capture_internal_exceptions, ensure_integration_enabled
+
+from typing import TYPE_CHECKING, TypeVar
+
+# Hack to get new Python features working in older versions
+# without introducing a hard dependency on `typing_extensions`
+# from: https://stackoverflow.com/a/71944042/300572
+if TYPE_CHECKING:
+    from typing import ParamSpec, Callable
+else:
+    # Fake ParamSpec
+    class ParamSpec:
+        def __init__(self, _):
+            self.args = None
+            self.kwargs = None
+
+    # Callable[anything] will return None
+    class _Callable:
+        def __getitem__(self, _):
+            return None
+
+    # Make instances
+    Callable = _Callable()
+
+
+try:
+    import clickhouse_driver  # type: ignore[import-not-found]
+
+except ImportError:
+    raise DidNotEnable("clickhouse-driver not installed.")
+
+
+class ClickhouseDriverIntegration(Integration):
+    identifier = "clickhouse_driver"
+    origin = f"auto.db.{identifier}"
+
+    @staticmethod
+    def setup_once() -> None:
+        _check_minimum_version(ClickhouseDriverIntegration, clickhouse_driver.VERSION)
+
+        # Every query is done using the Connection's `send_query` function
+        clickhouse_driver.connection.Connection.send_query = _wrap_start(
+            clickhouse_driver.connection.Connection.send_query
+        )
+
+        # If the query contains parameters then the send_data function is used to send those parameters to clickhouse
+        clickhouse_driver.client.Client.send_data = _wrap_send_data(
+            clickhouse_driver.client.Client.send_data
+        )
+
+        # Every query ends either with the Client's `receive_end_of_query` (no result expected)
+        # or its `receive_result` (result expected)
+        clickhouse_driver.client.Client.receive_end_of_query = _wrap_end(
+            clickhouse_driver.client.Client.receive_end_of_query
+        )
+        if hasattr(clickhouse_driver.client.Client, "receive_end_of_insert_query"):
+            # In 0.2.7, insert queries are handled separately via `receive_end_of_insert_query`
+            clickhouse_driver.client.Client.receive_end_of_insert_query = _wrap_end(
+                clickhouse_driver.client.Client.receive_end_of_insert_query
+            )
+        clickhouse_driver.client.Client.receive_result = _wrap_end(
+            clickhouse_driver.client.Client.receive_result
+        )
+
+
+P = ParamSpec("P")
+T = TypeVar("T")
+
+
+def _wrap_start(f: Callable[P, T]) -> Callable[P, T]:
+    @ensure_integration_enabled(ClickhouseDriverIntegration, f)
+    def _inner(*args: P.args, **kwargs: P.kwargs) -> T:
+        connection = args[0]
+        query = args[1]
+        query_id = args[2] if len(args) > 2 else kwargs.get("query_id")
+        params = args[3] if len(args) > 3 else kwargs.get("params")
+
+        span = sentry_sdk.start_span(
+            op=OP.DB,
+            name=query,
+            origin=ClickhouseDriverIntegration.origin,
+        )
+
+        connection._sentry_span = span  # type: ignore[attr-defined]
+
+        _set_db_data(span, connection)
+
+        span.set_data("query", query)
+
+        if query_id:
+            span.set_data("db.query_id", query_id)
+
+        if params and should_send_default_pii():
+            span.set_data("db.params", params)
+
+        # run the original code
+        ret = f(*args, **kwargs)
+
+        return ret
+
+    return _inner
+
+
+def _wrap_end(f: Callable[P, T]) -> Callable[P, T]:
+    def _inner_end(*args: P.args, **kwargs: P.kwargs) -> T:
+        res = f(*args, **kwargs)
+        instance = args[0]
+        span = getattr(instance.connection, "_sentry_span", None)  # type: ignore[attr-defined]
+
+        if span is not None:
+            if res is not None and should_send_default_pii():
+                span.set_data("db.result", res)
+
+            with capture_internal_exceptions():
+                span.scope.add_breadcrumb(
+                    message=span._data.pop("query"), category="query", data=span._data
+                )
+
+            span.finish()
+
+        return res
+
+    return _inner_end
+
+
+def _wrap_send_data(f: Callable[P, T]) -> Callable[P, T]:
+    def _inner_send_data(*args: P.args, **kwargs: P.kwargs) -> T:
+        instance = args[0]  # type: clickhouse_driver.client.Client
+        data = args[2]
+        span = getattr(instance.connection, "_sentry_span", None)
+
+        if span is not None:
+            _set_db_data(span, instance.connection)
+
+            if should_send_default_pii():
+                db_params = span._data.get("db.params", [])
+                db_params.extend(data)
+                span.set_data("db.params", db_params)
+
+        return f(*args, **kwargs)
+
+    return _inner_send_data
+
+
+def _set_db_data(
+    span: Span, connection: clickhouse_driver.connection.Connection
+) -> None:
+    span.set_data(SPANDATA.DB_SYSTEM, "clickhouse")
+    span.set_data(SPANDATA.SERVER_ADDRESS, connection.host)
+    span.set_data(SPANDATA.SERVER_PORT, connection.port)
+    span.set_data(SPANDATA.DB_NAME, connection.database)
+    span.set_data(SPANDATA.DB_USER, connection.user)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cloud_resource_context.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cloud_resource_context.py
new file mode 100644
index 00000000..ca5ae47e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cloud_resource_context.py
@@ -0,0 +1,280 @@
+import json
+import urllib3
+
+from sentry_sdk.integrations import Integration
+from sentry_sdk.api import set_context
+from sentry_sdk.utils import logger
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Dict
+
+
+CONTEXT_TYPE = "cloud_resource"
+
+HTTP_TIMEOUT = 2.0
+
+AWS_METADATA_HOST = "169.254.169.254"
+AWS_TOKEN_URL = "http://{}/latest/api/token".format(AWS_METADATA_HOST)
+AWS_METADATA_URL = "http://{}/latest/dynamic/instance-identity/document".format(
+    AWS_METADATA_HOST
+)
+
+GCP_METADATA_HOST = "metadata.google.internal"
+GCP_METADATA_URL = "http://{}/computeMetadata/v1/?recursive=true".format(
+    GCP_METADATA_HOST
+)
+
+
+class CLOUD_PROVIDER:  # noqa: N801
+    """
+    Name of the cloud provider.
+    see https://opentelemetry.io/docs/reference/specification/resource/semantic_conventions/cloud/
+    """
+
+    ALIBABA = "alibaba_cloud"
+    AWS = "aws"
+    AZURE = "azure"
+    GCP = "gcp"
+    IBM = "ibm_cloud"
+    TENCENT = "tencent_cloud"
+
+
+class CLOUD_PLATFORM:  # noqa: N801
+    """
+    The cloud platform.
+    see https://opentelemetry.io/docs/reference/specification/resource/semantic_conventions/cloud/
+    """
+
+    AWS_EC2 = "aws_ec2"
+    GCP_COMPUTE_ENGINE = "gcp_compute_engine"
+
+
+class CloudResourceContextIntegration(Integration):
+    """
+    Adds cloud resource context to the Senty scope
+    """
+
+    identifier = "cloudresourcecontext"
+
+    cloud_provider = ""
+
+    aws_token = ""
+    http = urllib3.PoolManager(timeout=HTTP_TIMEOUT)
+
+    gcp_metadata = None
+
+    def __init__(self, cloud_provider=""):
+        # type: (str) -> None
+        CloudResourceContextIntegration.cloud_provider = cloud_provider
+
+    @classmethod
+    def _is_aws(cls):
+        # type: () -> bool
+        try:
+            r = cls.http.request(
+                "PUT",
+                AWS_TOKEN_URL,
+                headers={"X-aws-ec2-metadata-token-ttl-seconds": "60"},
+            )
+
+            if r.status != 200:
+                return False
+
+            cls.aws_token = r.data.decode()
+            return True
+
+        except urllib3.exceptions.TimeoutError:
+            logger.debug(
+                "AWS metadata service timed out after %s seconds", HTTP_TIMEOUT
+            )
+            return False
+        except Exception as e:
+            logger.debug("Error checking AWS metadata service: %s", str(e))
+            return False
+
+    @classmethod
+    def _get_aws_context(cls):
+        # type: () -> Dict[str, str]
+        ctx = {
+            "cloud.provider": CLOUD_PROVIDER.AWS,
+            "cloud.platform": CLOUD_PLATFORM.AWS_EC2,
+        }
+
+        try:
+            r = cls.http.request(
+                "GET",
+                AWS_METADATA_URL,
+                headers={"X-aws-ec2-metadata-token": cls.aws_token},
+            )
+
+            if r.status != 200:
+                return ctx
+
+            data = json.loads(r.data.decode("utf-8"))
+
+            try:
+                ctx["cloud.account.id"] = data["accountId"]
+            except Exception:
+                pass
+
+            try:
+                ctx["cloud.availability_zone"] = data["availabilityZone"]
+            except Exception:
+                pass
+
+            try:
+                ctx["cloud.region"] = data["region"]
+            except Exception:
+                pass
+
+            try:
+                ctx["host.id"] = data["instanceId"]
+            except Exception:
+                pass
+
+            try:
+                ctx["host.type"] = data["instanceType"]
+            except Exception:
+                pass
+
+        except urllib3.exceptions.TimeoutError:
+            logger.debug(
+                "AWS metadata service timed out after %s seconds", HTTP_TIMEOUT
+            )
+        except Exception as e:
+            logger.debug("Error fetching AWS metadata: %s", str(e))
+
+        return ctx
+
+    @classmethod
+    def _is_gcp(cls):
+        # type: () -> bool
+        try:
+            r = cls.http.request(
+                "GET",
+                GCP_METADATA_URL,
+                headers={"Metadata-Flavor": "Google"},
+            )
+
+            if r.status != 200:
+                return False
+
+            cls.gcp_metadata = json.loads(r.data.decode("utf-8"))
+            return True
+
+        except urllib3.exceptions.TimeoutError:
+            logger.debug(
+                "GCP metadata service timed out after %s seconds", HTTP_TIMEOUT
+            )
+            return False
+        except Exception as e:
+            logger.debug("Error checking GCP metadata service: %s", str(e))
+            return False
+
+    @classmethod
+    def _get_gcp_context(cls):
+        # type: () -> Dict[str, str]
+        ctx = {
+            "cloud.provider": CLOUD_PROVIDER.GCP,
+            "cloud.platform": CLOUD_PLATFORM.GCP_COMPUTE_ENGINE,
+        }
+
+        try:
+            if cls.gcp_metadata is None:
+                r = cls.http.request(
+                    "GET",
+                    GCP_METADATA_URL,
+                    headers={"Metadata-Flavor": "Google"},
+                )
+
+                if r.status != 200:
+                    return ctx
+
+                cls.gcp_metadata = json.loads(r.data.decode("utf-8"))
+
+            try:
+                ctx["cloud.account.id"] = cls.gcp_metadata["project"]["projectId"]
+            except Exception:
+                pass
+
+            try:
+                ctx["cloud.availability_zone"] = cls.gcp_metadata["instance"][
+                    "zone"
+                ].split("/")[-1]
+            except Exception:
+                pass
+
+            try:
+                # only populated in google cloud run
+                ctx["cloud.region"] = cls.gcp_metadata["instance"]["region"].split("/")[
+                    -1
+                ]
+            except Exception:
+                pass
+
+            try:
+                ctx["host.id"] = cls.gcp_metadata["instance"]["id"]
+            except Exception:
+                pass
+
+        except urllib3.exceptions.TimeoutError:
+            logger.debug(
+                "GCP metadata service timed out after %s seconds", HTTP_TIMEOUT
+            )
+        except Exception as e:
+            logger.debug("Error fetching GCP metadata: %s", str(e))
+
+        return ctx
+
+    @classmethod
+    def _get_cloud_provider(cls):
+        # type: () -> str
+        if cls._is_aws():
+            return CLOUD_PROVIDER.AWS
+
+        if cls._is_gcp():
+            return CLOUD_PROVIDER.GCP
+
+        return ""
+
+    @classmethod
+    def _get_cloud_resource_context(cls):
+        # type: () -> Dict[str, str]
+        cloud_provider = (
+            cls.cloud_provider
+            if cls.cloud_provider != ""
+            else CloudResourceContextIntegration._get_cloud_provider()
+        )
+        if cloud_provider in context_getters.keys():
+            return context_getters[cloud_provider]()
+
+        return {}
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        cloud_provider = CloudResourceContextIntegration.cloud_provider
+        unsupported_cloud_provider = (
+            cloud_provider != "" and cloud_provider not in context_getters.keys()
+        )
+
+        if unsupported_cloud_provider:
+            logger.warning(
+                "Invalid value for cloud_provider: %s (must be in %s). Falling back to autodetection...",
+                CloudResourceContextIntegration.cloud_provider,
+                list(context_getters.keys()),
+            )
+
+        context = CloudResourceContextIntegration._get_cloud_resource_context()
+        if context != {}:
+            set_context(CONTEXT_TYPE, context)
+
+
+# Map with the currently supported cloud providers
+# mapping to functions extracting the context
+context_getters = {
+    CLOUD_PROVIDER.AWS: CloudResourceContextIntegration._get_aws_context,
+    CLOUD_PROVIDER.GCP: CloudResourceContextIntegration._get_gcp_context,
+}
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cohere.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cohere.py
new file mode 100644
index 00000000..b4c2af91
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/cohere.py
@@ -0,0 +1,270 @@
+from functools import wraps
+
+from sentry_sdk import consts
+from sentry_sdk.ai.monitoring import record_token_usage
+from sentry_sdk.consts import SPANDATA
+from sentry_sdk.ai.utils import set_data_normalized
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Iterator
+    from sentry_sdk.tracing import Span
+
+import sentry_sdk
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+
+try:
+    from cohere.client import Client
+    from cohere.base_client import BaseCohere
+    from cohere import (
+        ChatStreamEndEvent,
+        NonStreamedChatResponse,
+    )
+
+    if TYPE_CHECKING:
+        from cohere import StreamedChatResponse
+except ImportError:
+    raise DidNotEnable("Cohere not installed")
+
+try:
+    # cohere 5.9.3+
+    from cohere import StreamEndStreamedChatResponse
+except ImportError:
+    from cohere import StreamedChatResponse_StreamEnd as StreamEndStreamedChatResponse
+
+
+COLLECTED_CHAT_PARAMS = {
+    "model": SPANDATA.AI_MODEL_ID,
+    "k": SPANDATA.AI_TOP_K,
+    "p": SPANDATA.AI_TOP_P,
+    "seed": SPANDATA.AI_SEED,
+    "frequency_penalty": SPANDATA.AI_FREQUENCY_PENALTY,
+    "presence_penalty": SPANDATA.AI_PRESENCE_PENALTY,
+    "raw_prompting": SPANDATA.AI_RAW_PROMPTING,
+}
+
+COLLECTED_PII_CHAT_PARAMS = {
+    "tools": SPANDATA.AI_TOOLS,
+    "preamble": SPANDATA.AI_PREAMBLE,
+}
+
+COLLECTED_CHAT_RESP_ATTRS = {
+    "generation_id": "ai.generation_id",
+    "is_search_required": "ai.is_search_required",
+    "finish_reason": "ai.finish_reason",
+}
+
+COLLECTED_PII_CHAT_RESP_ATTRS = {
+    "citations": "ai.citations",
+    "documents": "ai.documents",
+    "search_queries": "ai.search_queries",
+    "search_results": "ai.search_results",
+    "tool_calls": "ai.tool_calls",
+}
+
+
+class CohereIntegration(Integration):
+    identifier = "cohere"
+    origin = f"auto.ai.{identifier}"
+
+    def __init__(self, include_prompts=True):
+        # type: (CohereIntegration, bool) -> None
+        self.include_prompts = include_prompts
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        BaseCohere.chat = _wrap_chat(BaseCohere.chat, streaming=False)
+        Client.embed = _wrap_embed(Client.embed)
+        BaseCohere.chat_stream = _wrap_chat(BaseCohere.chat_stream, streaming=True)
+
+
+def _capture_exception(exc):
+    # type: (Any) -> None
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "cohere", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _wrap_chat(f, streaming):
+    # type: (Callable[..., Any], bool) -> Callable[..., Any]
+
+    def collect_chat_response_fields(span, res, include_pii):
+        # type: (Span, NonStreamedChatResponse, bool) -> None
+        if include_pii:
+            if hasattr(res, "text"):
+                set_data_normalized(
+                    span,
+                    SPANDATA.AI_RESPONSES,
+                    [res.text],
+                )
+            for pii_attr in COLLECTED_PII_CHAT_RESP_ATTRS:
+                if hasattr(res, pii_attr):
+                    set_data_normalized(span, "ai." + pii_attr, getattr(res, pii_attr))
+
+        for attr in COLLECTED_CHAT_RESP_ATTRS:
+            if hasattr(res, attr):
+                set_data_normalized(span, "ai." + attr, getattr(res, attr))
+
+        if hasattr(res, "meta"):
+            if hasattr(res.meta, "billed_units"):
+                record_token_usage(
+                    span,
+                    prompt_tokens=res.meta.billed_units.input_tokens,
+                    completion_tokens=res.meta.billed_units.output_tokens,
+                )
+            elif hasattr(res.meta, "tokens"):
+                record_token_usage(
+                    span,
+                    prompt_tokens=res.meta.tokens.input_tokens,
+                    completion_tokens=res.meta.tokens.output_tokens,
+                )
+
+            if hasattr(res.meta, "warnings"):
+                set_data_normalized(span, "ai.warnings", res.meta.warnings)
+
+    @wraps(f)
+    def new_chat(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(CohereIntegration)
+
+        if (
+            integration is None
+            or "message" not in kwargs
+            or not isinstance(kwargs.get("message"), str)
+        ):
+            return f(*args, **kwargs)
+
+        message = kwargs.get("message")
+
+        span = sentry_sdk.start_span(
+            op=consts.OP.COHERE_CHAT_COMPLETIONS_CREATE,
+            name="cohere.client.Chat",
+            origin=CohereIntegration.origin,
+        )
+        span.__enter__()
+        try:
+            res = f(*args, **kwargs)
+        except Exception as e:
+            _capture_exception(e)
+            span.__exit__(None, None, None)
+            raise e from None
+
+        with capture_internal_exceptions():
+            if should_send_default_pii() and integration.include_prompts:
+                set_data_normalized(
+                    span,
+                    SPANDATA.AI_INPUT_MESSAGES,
+                    list(
+                        map(
+                            lambda x: {
+                                "role": getattr(x, "role", "").lower(),
+                                "content": getattr(x, "message", ""),
+                            },
+                            kwargs.get("chat_history", []),
+                        )
+                    )
+                    + [{"role": "user", "content": message}],
+                )
+                for k, v in COLLECTED_PII_CHAT_PARAMS.items():
+                    if k in kwargs:
+                        set_data_normalized(span, v, kwargs[k])
+
+            for k, v in COLLECTED_CHAT_PARAMS.items():
+                if k in kwargs:
+                    set_data_normalized(span, v, kwargs[k])
+            set_data_normalized(span, SPANDATA.AI_STREAMING, False)
+
+            if streaming:
+                old_iterator = res
+
+                def new_iterator():
+                    # type: () -> Iterator[StreamedChatResponse]
+
+                    with capture_internal_exceptions():
+                        for x in old_iterator:
+                            if isinstance(x, ChatStreamEndEvent) or isinstance(
+                                x, StreamEndStreamedChatResponse
+                            ):
+                                collect_chat_response_fields(
+                                    span,
+                                    x.response,
+                                    include_pii=should_send_default_pii()
+                                    and integration.include_prompts,
+                                )
+                            yield x
+
+                    span.__exit__(None, None, None)
+
+                return new_iterator()
+            elif isinstance(res, NonStreamedChatResponse):
+                collect_chat_response_fields(
+                    span,
+                    res,
+                    include_pii=should_send_default_pii()
+                    and integration.include_prompts,
+                )
+                span.__exit__(None, None, None)
+            else:
+                set_data_normalized(span, "unknown_response", True)
+                span.__exit__(None, None, None)
+            return res
+
+    return new_chat
+
+
+def _wrap_embed(f):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+
+    @wraps(f)
+    def new_embed(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(CohereIntegration)
+        if integration is None:
+            return f(*args, **kwargs)
+
+        with sentry_sdk.start_span(
+            op=consts.OP.COHERE_EMBEDDINGS_CREATE,
+            name="Cohere Embedding Creation",
+            origin=CohereIntegration.origin,
+        ) as span:
+            if "texts" in kwargs and (
+                should_send_default_pii() and integration.include_prompts
+            ):
+                if isinstance(kwargs["texts"], str):
+                    set_data_normalized(span, "ai.texts", [kwargs["texts"]])
+                elif (
+                    isinstance(kwargs["texts"], list)
+                    and len(kwargs["texts"]) > 0
+                    and isinstance(kwargs["texts"][0], str)
+                ):
+                    set_data_normalized(
+                        span, SPANDATA.AI_INPUT_MESSAGES, kwargs["texts"]
+                    )
+
+            if "model" in kwargs:
+                set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
+            try:
+                res = f(*args, **kwargs)
+            except Exception as e:
+                _capture_exception(e)
+                raise e from None
+            if (
+                hasattr(res, "meta")
+                and hasattr(res.meta, "billed_units")
+                and hasattr(res.meta.billed_units, "input_tokens")
+            ):
+                record_token_usage(
+                    span,
+                    prompt_tokens=res.meta.billed_units.input_tokens,
+                    total_tokens=res.meta.billed_units.input_tokens,
+                )
+            return res
+
+    return new_embed
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dedupe.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dedupe.py
new file mode 100644
index 00000000..a115e352
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dedupe.py
@@ -0,0 +1,51 @@
+import sentry_sdk
+from sentry_sdk.utils import ContextVar
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+
+    from sentry_sdk._types import Event, Hint
+
+
+class DedupeIntegration(Integration):
+    identifier = "dedupe"
+
+    def __init__(self):
+        # type: () -> None
+        self._last_seen = ContextVar("last-seen")
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        @add_global_event_processor
+        def processor(event, hint):
+            # type: (Event, Optional[Hint]) -> Optional[Event]
+            if hint is None:
+                return event
+
+            integration = sentry_sdk.get_client().get_integration(DedupeIntegration)
+            if integration is None:
+                return event
+
+            exc_info = hint.get("exc_info", None)
+            if exc_info is None:
+                return event
+
+            exc = exc_info[1]
+            if integration._last_seen.get(None) is exc:
+                return None
+            integration._last_seen.set(exc)
+            return event
+
+    @staticmethod
+    def reset_last_seen():
+        # type: () -> None
+        integration = sentry_sdk.get_client().get_integration(DedupeIntegration)
+        if integration is None:
+            return
+
+        integration._last_seen.set(None)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py
new file mode 100644
index 00000000..ff67b3e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/__init__.py
@@ -0,0 +1,747 @@
+import inspect
+import sys
+import threading
+import weakref
+from importlib import import_module
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.scope import add_global_event_processor, should_send_default_pii
+from sentry_sdk.serializer import add_global_repr_processor
+from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource
+from sentry_sdk.tracing_utils import add_query_source, record_sql_queries
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    HAS_REAL_CONTEXTVARS,
+    CONTEXTVARS_ERROR_MESSAGE,
+    SENSITIVE_DATA_SUBSTITUTE,
+    logger,
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    transaction_from_function,
+    walk_exception_chain,
+)
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.integrations._wsgi_common import (
+    DEFAULT_HTTP_METHODS_TO_CAPTURE,
+    RequestExtractor,
+)
+
+try:
+    from django import VERSION as DJANGO_VERSION
+    from django.conf import settings as django_settings
+    from django.core import signals
+    from django.conf import settings
+
+    try:
+        from django.urls import resolve
+    except ImportError:
+        from django.core.urlresolvers import resolve
+
+    try:
+        from django.urls import Resolver404
+    except ImportError:
+        from django.core.urlresolvers import Resolver404
+
+    # Only available in Django 3.0+
+    try:
+        from django.core.handlers.asgi import ASGIRequest
+    except Exception:
+        ASGIRequest = None
+
+except ImportError:
+    raise DidNotEnable("Django not installed")
+
+from sentry_sdk.integrations.django.transactions import LEGACY_RESOLVER
+from sentry_sdk.integrations.django.templates import (
+    get_template_frame_from_exception,
+    patch_templates,
+)
+from sentry_sdk.integrations.django.middleware import patch_django_middlewares
+from sentry_sdk.integrations.django.signals_handlers import patch_signals
+from sentry_sdk.integrations.django.views import patch_views
+
+if DJANGO_VERSION[:2] > (1, 8):
+    from sentry_sdk.integrations.django.caching import patch_caching
+else:
+    patch_caching = None  # type: ignore
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Optional
+    from typing import Union
+    from typing import List
+
+    from django.core.handlers.wsgi import WSGIRequest
+    from django.http.response import HttpResponse
+    from django.http.request import QueryDict
+    from django.utils.datastructures import MultiValueDict
+
+    from sentry_sdk.tracing import Span
+    from sentry_sdk.integrations.wsgi import _ScopedResponse
+    from sentry_sdk._types import Event, Hint, EventProcessor, NotImplementedType
+
+
+if DJANGO_VERSION < (1, 10):
+
+    def is_authenticated(request_user):
+        # type: (Any) -> bool
+        return request_user.is_authenticated()
+
+else:
+
+    def is_authenticated(request_user):
+        # type: (Any) -> bool
+        return request_user.is_authenticated
+
+
+TRANSACTION_STYLE_VALUES = ("function_name", "url")
+
+
+class DjangoIntegration(Integration):
+    """
+    Auto instrument a Django application.
+
+    :param transaction_style: How to derive transaction names. Either `"function_name"` or `"url"`. Defaults to `"url"`.
+    :param middleware_spans: Whether to create spans for middleware. Defaults to `True`.
+    :param signals_spans: Whether to create spans for signals. Defaults to `True`.
+    :param signals_denylist: A list of signals to ignore when creating spans.
+    :param cache_spans: Whether to create spans for cache operations. Defaults to `False`.
+    """
+
+    identifier = "django"
+    origin = f"auto.http.{identifier}"
+    origin_db = f"auto.db.{identifier}"
+
+    transaction_style = ""
+    middleware_spans = None
+    signals_spans = None
+    cache_spans = None
+    signals_denylist = []  # type: list[signals.Signal]
+
+    def __init__(
+        self,
+        transaction_style="url",  # type: str
+        middleware_spans=True,  # type: bool
+        signals_spans=True,  # type: bool
+        cache_spans=False,  # type: bool
+        signals_denylist=None,  # type: Optional[list[signals.Signal]]
+        http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE,  # type: tuple[str, ...]
+    ):
+        # type: (...) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+        self.middleware_spans = middleware_spans
+
+        self.signals_spans = signals_spans
+        self.signals_denylist = signals_denylist or []
+
+        self.cache_spans = cache_spans
+
+        self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _check_minimum_version(DjangoIntegration, DJANGO_VERSION)
+
+        install_sql_hook()
+        # Patch in our custom middleware.
+
+        # logs an error for every 500
+        ignore_logger("django.server")
+        ignore_logger("django.request")
+
+        from django.core.handlers.wsgi import WSGIHandler
+
+        old_app = WSGIHandler.__call__
+
+        @ensure_integration_enabled(DjangoIntegration, old_app)
+        def sentry_patched_wsgi_handler(self, environ, start_response):
+            # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+            bound_old_app = old_app.__get__(self, WSGIHandler)
+
+            from django.conf import settings
+
+            use_x_forwarded_for = settings.USE_X_FORWARDED_HOST
+
+            integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+
+            middleware = SentryWsgiMiddleware(
+                bound_old_app,
+                use_x_forwarded_for,
+                span_origin=DjangoIntegration.origin,
+                http_methods_to_capture=(
+                    integration.http_methods_to_capture
+                    if integration
+                    else DEFAULT_HTTP_METHODS_TO_CAPTURE
+                ),
+            )
+            return middleware(environ, start_response)
+
+        WSGIHandler.__call__ = sentry_patched_wsgi_handler
+
+        _patch_get_response()
+
+        _patch_django_asgi_handler()
+
+        signals.got_request_exception.connect(_got_request_exception)
+
+        @add_global_event_processor
+        def process_django_templates(event, hint):
+            # type: (Event, Optional[Hint]) -> Optional[Event]
+            if hint is None:
+                return event
+
+            exc_info = hint.get("exc_info", None)
+
+            if exc_info is None:
+                return event
+
+            exception = event.get("exception", None)
+
+            if exception is None:
+                return event
+
+            values = exception.get("values", None)
+
+            if values is None:
+                return event
+
+            for exception, (_, exc_value, _) in zip(
+                reversed(values), walk_exception_chain(exc_info)
+            ):
+                frame = get_template_frame_from_exception(exc_value)
+                if frame is not None:
+                    frames = exception.get("stacktrace", {}).get("frames", [])
+
+                    for i in reversed(range(len(frames))):
+                        f = frames[i]
+                        if (
+                            f.get("function") in ("Parser.parse", "parse", "render")
+                            and f.get("module") == "django.template.base"
+                        ):
+                            i += 1
+                            break
+                    else:
+                        i = len(frames)
+
+                    frames.insert(i, frame)
+
+            return event
+
+        @add_global_repr_processor
+        def _django_queryset_repr(value, hint):
+            # type: (Any, Dict[str, Any]) -> Union[NotImplementedType, str]
+            try:
+                # Django 1.6 can fail to import `QuerySet` when Django settings
+                # have not yet been initialized.
+                #
+                # If we fail to import, return `NotImplemented`. It's at least
+                # unlikely that we have a query set in `value` when importing
+                # `QuerySet` fails.
+                from django.db.models.query import QuerySet
+            except Exception:
+                return NotImplemented
+
+            if not isinstance(value, QuerySet) or value._result_cache:
+                return NotImplemented
+
+            return "<%s from %s at 0x%x>" % (
+                value.__class__.__name__,
+                value.__module__,
+                id(value),
+            )
+
+        _patch_channels()
+        patch_django_middlewares()
+        patch_views()
+        patch_templates()
+        patch_signals()
+
+        if patch_caching is not None:
+            patch_caching()
+
+
+_DRF_PATCHED = False
+_DRF_PATCH_LOCK = threading.Lock()
+
+
+def _patch_drf():
+    # type: () -> None
+    """
+    Patch Django Rest Framework for more/better request data. DRF's request
+    type is a wrapper around Django's request type. The attribute we're
+    interested in is `request.data`, which is a cached property containing a
+    parsed request body. Reading a request body from that property is more
+    reliable than reading from any of Django's own properties, as those don't
+    hold payloads in memory and therefore can only be accessed once.
+
+    We patch the Django request object to include a weak backreference to the
+    DRF request object, such that we can later use either in
+    `DjangoRequestExtractor`.
+
+    This function is not called directly on SDK setup, because importing almost
+    any part of Django Rest Framework will try to access Django settings (where
+    `sentry_sdk.init()` might be called from in the first place). Instead we
+    run this function on every request and do the patching on the first
+    request.
+    """
+
+    global _DRF_PATCHED
+
+    if _DRF_PATCHED:
+        # Double-checked locking
+        return
+
+    with _DRF_PATCH_LOCK:
+        if _DRF_PATCHED:
+            return
+
+        # We set this regardless of whether the code below succeeds or fails.
+        # There is no point in trying to patch again on the next request.
+        _DRF_PATCHED = True
+
+        with capture_internal_exceptions():
+            try:
+                from rest_framework.views import APIView  # type: ignore
+            except ImportError:
+                pass
+            else:
+                old_drf_initial = APIView.initial
+
+                def sentry_patched_drf_initial(self, request, *args, **kwargs):
+                    # type: (APIView, Any, *Any, **Any) -> Any
+                    with capture_internal_exceptions():
+                        request._request._sentry_drf_request_backref = weakref.ref(
+                            request
+                        )
+                        pass
+                    return old_drf_initial(self, request, *args, **kwargs)
+
+                APIView.initial = sentry_patched_drf_initial
+
+
+def _patch_channels():
+    # type: () -> None
+    try:
+        from channels.http import AsgiHandler  # type: ignore
+    except ImportError:
+        return
+
+    if not HAS_REAL_CONTEXTVARS:
+        # We better have contextvars or we're going to leak state between
+        # requests.
+        #
+        # We cannot hard-raise here because channels may not be used at all in
+        # the current process. That is the case when running traditional WSGI
+        # workers in gunicorn+gevent and the websocket stuff in a separate
+        # process.
+        logger.warning(
+            "We detected that you are using Django channels 2.0."
+            + CONTEXTVARS_ERROR_MESSAGE
+        )
+
+    from sentry_sdk.integrations.django.asgi import patch_channels_asgi_handler_impl
+
+    patch_channels_asgi_handler_impl(AsgiHandler)
+
+
+def _patch_django_asgi_handler():
+    # type: () -> None
+    try:
+        from django.core.handlers.asgi import ASGIHandler
+    except ImportError:
+        return
+
+    if not HAS_REAL_CONTEXTVARS:
+        # We better have contextvars or we're going to leak state between
+        # requests.
+        #
+        # We cannot hard-raise here because Django's ASGI stuff may not be used
+        # at all.
+        logger.warning(
+            "We detected that you are using Django 3." + CONTEXTVARS_ERROR_MESSAGE
+        )
+
+    from sentry_sdk.integrations.django.asgi import patch_django_asgi_handler_impl
+
+    patch_django_asgi_handler_impl(ASGIHandler)
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, WSGIRequest) -> None
+    try:
+        transaction_name = None
+        if transaction_style == "function_name":
+            fn = resolve(request.path).func
+            transaction_name = transaction_from_function(getattr(fn, "view_class", fn))
+
+        elif transaction_style == "url":
+            if hasattr(request, "urlconf"):
+                transaction_name = LEGACY_RESOLVER.resolve(
+                    request.path_info, urlconf=request.urlconf
+                )
+            else:
+                transaction_name = LEGACY_RESOLVER.resolve(request.path_info)
+
+        if transaction_name is None:
+            transaction_name = request.path_info
+            source = TransactionSource.URL
+        else:
+            source = SOURCE_FOR_STYLE[transaction_style]
+
+        scope.set_transaction_name(
+            transaction_name,
+            source=source,
+        )
+    except Resolver404:
+        urlconf = import_module(settings.ROOT_URLCONF)
+        # This exception only gets thrown when transaction_style is `function_name`
+        # So we don't check here what style is configured
+        if hasattr(urlconf, "handler404"):
+            handler = urlconf.handler404
+            if isinstance(handler, str):
+                scope.transaction = handler
+            else:
+                scope.transaction = transaction_from_function(
+                    getattr(handler, "view_class", handler)
+                )
+    except Exception:
+        pass
+
+
+def _before_get_response(request):
+    # type: (WSGIRequest) -> None
+    integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+    if integration is None:
+        return
+
+    _patch_drf()
+
+    scope = sentry_sdk.get_current_scope()
+    # Rely on WSGI middleware to start a trace
+    _set_transaction_name_and_source(scope, integration.transaction_style, request)
+
+    scope.add_event_processor(
+        _make_wsgi_request_event_processor(weakref.ref(request), integration)
+    )
+
+
+def _attempt_resolve_again(request, scope, transaction_style):
+    # type: (WSGIRequest, sentry_sdk.Scope, str) -> None
+    """
+    Some django middlewares overwrite request.urlconf
+    so we need to respect that contract,
+    so we try to resolve the url again.
+    """
+    if not hasattr(request, "urlconf"):
+        return
+
+    _set_transaction_name_and_source(scope, transaction_style, request)
+
+
+def _after_get_response(request):
+    # type: (WSGIRequest) -> None
+    integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+    if integration is None or integration.transaction_style != "url":
+        return
+
+    scope = sentry_sdk.get_current_scope()
+    _attempt_resolve_again(request, scope, integration.transaction_style)
+
+
+def _patch_get_response():
+    # type: () -> None
+    """
+    patch get_response, because at that point we have the Django request object
+    """
+    from django.core.handlers.base import BaseHandler
+
+    old_get_response = BaseHandler.get_response
+
+    def sentry_patched_get_response(self, request):
+        # type: (Any, WSGIRequest) -> Union[HttpResponse, BaseException]
+        _before_get_response(request)
+        rv = old_get_response(self, request)
+        _after_get_response(request)
+        return rv
+
+    BaseHandler.get_response = sentry_patched_get_response
+
+    if hasattr(BaseHandler, "get_response_async"):
+        from sentry_sdk.integrations.django.asgi import patch_get_response_async
+
+        patch_get_response_async(BaseHandler, _before_get_response)
+
+
+def _make_wsgi_request_event_processor(weak_request, integration):
+    # type: (Callable[[], WSGIRequest], DjangoIntegration) -> EventProcessor
+    def wsgi_request_event_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        # if the request is gone we are fine not logging the data from
+        # it.  This might happen if the processor is pushed away to
+        # another thread.
+        request = weak_request()
+        if request is None:
+            return event
+
+        django_3 = ASGIRequest is not None
+        if django_3 and type(request) == ASGIRequest:
+            # We have a `asgi_request_event_processor` for this.
+            return event
+
+        with capture_internal_exceptions():
+            DjangoRequestExtractor(request).extract_into_event(event)
+
+        if should_send_default_pii():
+            with capture_internal_exceptions():
+                _set_user_info(request, event)
+
+        return event
+
+    return wsgi_request_event_processor
+
+
+def _got_request_exception(request=None, **kwargs):
+    # type: (WSGIRequest, **Any) -> None
+    client = sentry_sdk.get_client()
+    integration = client.get_integration(DjangoIntegration)
+    if integration is None:
+        return
+
+    if request is not None and integration.transaction_style == "url":
+        scope = sentry_sdk.get_current_scope()
+        _attempt_resolve_again(request, scope, integration.transaction_style)
+
+    event, hint = event_from_exception(
+        sys.exc_info(),
+        client_options=client.options,
+        mechanism={"type": "django", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+class DjangoRequestExtractor(RequestExtractor):
+    def __init__(self, request):
+        # type: (Union[WSGIRequest, ASGIRequest]) -> None
+        try:
+            drf_request = request._sentry_drf_request_backref()
+            if drf_request is not None:
+                request = drf_request
+        except AttributeError:
+            pass
+        self.request = request
+
+    def env(self):
+        # type: () -> Dict[str, str]
+        return self.request.META
+
+    def cookies(self):
+        # type: () -> Dict[str, Union[str, AnnotatedValue]]
+        privacy_cookies = [
+            django_settings.CSRF_COOKIE_NAME,
+            django_settings.SESSION_COOKIE_NAME,
+        ]
+
+        clean_cookies = {}  # type: Dict[str, Union[str, AnnotatedValue]]
+        for key, val in self.request.COOKIES.items():
+            if key in privacy_cookies:
+                clean_cookies[key] = SENSITIVE_DATA_SUBSTITUTE
+            else:
+                clean_cookies[key] = val
+
+        return clean_cookies
+
+    def raw_data(self):
+        # type: () -> bytes
+        return self.request.body
+
+    def form(self):
+        # type: () -> QueryDict
+        return self.request.POST
+
+    def files(self):
+        # type: () -> MultiValueDict
+        return self.request.FILES
+
+    def size_of_file(self, file):
+        # type: (Any) -> int
+        return file.size
+
+    def parsed_body(self):
+        # type: () -> Optional[Dict[str, Any]]
+        try:
+            return self.request.data
+        except Exception:
+            return RequestExtractor.parsed_body(self)
+
+
+def _set_user_info(request, event):
+    # type: (WSGIRequest, Event) -> None
+    user_info = event.setdefault("user", {})
+
+    user = getattr(request, "user", None)
+
+    if user is None or not is_authenticated(user):
+        return
+
+    try:
+        user_info.setdefault("id", str(user.pk))
+    except Exception:
+        pass
+
+    try:
+        user_info.setdefault("email", user.email)
+    except Exception:
+        pass
+
+    try:
+        user_info.setdefault("username", user.get_username())
+    except Exception:
+        pass
+
+
+def install_sql_hook():
+    # type: () -> None
+    """If installed this causes Django's queries to be captured."""
+    try:
+        from django.db.backends.utils import CursorWrapper
+    except ImportError:
+        from django.db.backends.util import CursorWrapper
+
+    try:
+        # django 1.6 and 1.7 compatability
+        from django.db.backends import BaseDatabaseWrapper
+    except ImportError:
+        # django 1.8 or later
+        from django.db.backends.base.base import BaseDatabaseWrapper
+
+    try:
+        real_execute = CursorWrapper.execute
+        real_executemany = CursorWrapper.executemany
+        real_connect = BaseDatabaseWrapper.connect
+    except AttributeError:
+        # This won't work on Django versions < 1.6
+        return
+
+    @ensure_integration_enabled(DjangoIntegration, real_execute)
+    def execute(self, sql, params=None):
+        # type: (CursorWrapper, Any, Optional[Any]) -> Any
+        with record_sql_queries(
+            cursor=self.cursor,
+            query=sql,
+            params_list=params,
+            paramstyle="format",
+            executemany=False,
+            span_origin=DjangoIntegration.origin_db,
+        ) as span:
+            _set_db_data(span, self)
+            result = real_execute(self, sql, params)
+
+        with capture_internal_exceptions():
+            add_query_source(span)
+
+        return result
+
+    @ensure_integration_enabled(DjangoIntegration, real_executemany)
+    def executemany(self, sql, param_list):
+        # type: (CursorWrapper, Any, List[Any]) -> Any
+        with record_sql_queries(
+            cursor=self.cursor,
+            query=sql,
+            params_list=param_list,
+            paramstyle="format",
+            executemany=True,
+            span_origin=DjangoIntegration.origin_db,
+        ) as span:
+            _set_db_data(span, self)
+
+            result = real_executemany(self, sql, param_list)
+
+        with capture_internal_exceptions():
+            add_query_source(span)
+
+        return result
+
+    @ensure_integration_enabled(DjangoIntegration, real_connect)
+    def connect(self):
+        # type: (BaseDatabaseWrapper) -> None
+        with capture_internal_exceptions():
+            sentry_sdk.add_breadcrumb(message="connect", category="query")
+
+        with sentry_sdk.start_span(
+            op=OP.DB,
+            name="connect",
+            origin=DjangoIntegration.origin_db,
+        ) as span:
+            _set_db_data(span, self)
+            return real_connect(self)
+
+    CursorWrapper.execute = execute
+    CursorWrapper.executemany = executemany
+    BaseDatabaseWrapper.connect = connect
+    ignore_logger("django.db.backends")
+
+
+def _set_db_data(span, cursor_or_db):
+    # type: (Span, Any) -> None
+    db = cursor_or_db.db if hasattr(cursor_or_db, "db") else cursor_or_db
+    vendor = db.vendor
+    span.set_data(SPANDATA.DB_SYSTEM, vendor)
+
+    # Some custom backends override `__getattr__`, making it look like `cursor_or_db`
+    # actually has a `connection` and the `connection` has a `get_dsn_parameters`
+    # attribute, only to throw an error once you actually want to call it.
+    # Hence the `inspect` check whether `get_dsn_parameters` is an actual callable
+    # function.
+    is_psycopg2 = (
+        hasattr(cursor_or_db, "connection")
+        and hasattr(cursor_or_db.connection, "get_dsn_parameters")
+        and inspect.isroutine(cursor_or_db.connection.get_dsn_parameters)
+    )
+    if is_psycopg2:
+        connection_params = cursor_or_db.connection.get_dsn_parameters()
+    else:
+        try:
+            # psycopg3, only extract needed params as get_parameters
+            # can be slow because of the additional logic to filter out default
+            # values
+            connection_params = {
+                "dbname": cursor_or_db.connection.info.dbname,
+                "port": cursor_or_db.connection.info.port,
+            }
+            # PGhost returns host or base dir of UNIX socket as an absolute path
+            # starting with /, use it only when it contains host
+            pg_host = cursor_or_db.connection.info.host
+            if pg_host and not pg_host.startswith("/"):
+                connection_params["host"] = pg_host
+        except Exception:
+            connection_params = db.get_connection_params()
+
+    db_name = connection_params.get("dbname") or connection_params.get("database")
+    if db_name is not None:
+        span.set_data(SPANDATA.DB_NAME, db_name)
+
+    server_address = connection_params.get("host")
+    if server_address is not None:
+        span.set_data(SPANDATA.SERVER_ADDRESS, server_address)
+
+    server_port = connection_params.get("port")
+    if server_port is not None:
+        span.set_data(SPANDATA.SERVER_PORT, str(server_port))
+
+    server_socket_address = connection_params.get("unix_socket")
+    if server_socket_address is not None:
+        span.set_data(SPANDATA.SERVER_SOCKET_ADDRESS, server_socket_address)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/asgi.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/asgi.py
new file mode 100644
index 00000000..73a25acc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/asgi.py
@@ -0,0 +1,245 @@
+"""
+Instrumentation for Django 3.0
+
+Since this file contains `async def` it is conditionally imported in
+`sentry_sdk.integrations.django` (depending on the existence of
+`django.core.handlers.asgi`.
+"""
+
+import asyncio
+import functools
+import inspect
+
+from django.core.handlers.wsgi import WSGIRequest
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Union, TypeVar
+
+    from django.core.handlers.asgi import ASGIRequest
+    from django.http.response import HttpResponse
+
+    from sentry_sdk._types import Event, EventProcessor
+
+    _F = TypeVar("_F", bound=Callable[..., Any])
+
+
+# Python 3.12 deprecates asyncio.iscoroutinefunction() as an alias for
+# inspect.iscoroutinefunction(), whilst also removing the _is_coroutine marker.
+# The latter is replaced with the inspect.markcoroutinefunction decorator.
+# Until 3.12 is the minimum supported Python version, provide a shim.
+# This was copied from https://github.com/django/asgiref/blob/main/asgiref/sync.py
+if hasattr(inspect, "markcoroutinefunction"):
+    iscoroutinefunction = inspect.iscoroutinefunction
+    markcoroutinefunction = inspect.markcoroutinefunction
+else:
+    iscoroutinefunction = asyncio.iscoroutinefunction  # type: ignore[assignment]
+
+    def markcoroutinefunction(func: "_F") -> "_F":
+        func._is_coroutine = asyncio.coroutines._is_coroutine  # type: ignore
+        return func
+
+
+def _make_asgi_request_event_processor(request):
+    # type: (ASGIRequest) -> EventProcessor
+    def asgi_request_event_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        # if the request is gone we are fine not logging the data from
+        # it.  This might happen if the processor is pushed away to
+        # another thread.
+        from sentry_sdk.integrations.django import (
+            DjangoRequestExtractor,
+            _set_user_info,
+        )
+
+        if request is None:
+            return event
+
+        if type(request) == WSGIRequest:
+            return event
+
+        with capture_internal_exceptions():
+            DjangoRequestExtractor(request).extract_into_event(event)
+
+        if should_send_default_pii():
+            with capture_internal_exceptions():
+                _set_user_info(request, event)
+
+        return event
+
+    return asgi_request_event_processor
+
+
+def patch_django_asgi_handler_impl(cls):
+    # type: (Any) -> None
+
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    old_app = cls.__call__
+
+    async def sentry_patched_asgi_handler(self, scope, receive, send):
+        # type: (Any, Any, Any, Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+        if integration is None:
+            return await old_app(self, scope, receive, send)
+
+        middleware = SentryAsgiMiddleware(
+            old_app.__get__(self, cls),
+            unsafe_context_data=True,
+            span_origin=DjangoIntegration.origin,
+            http_methods_to_capture=integration.http_methods_to_capture,
+        )._run_asgi3
+
+        return await middleware(scope, receive, send)
+
+    cls.__call__ = sentry_patched_asgi_handler
+
+    modern_django_asgi_support = hasattr(cls, "create_request")
+    if modern_django_asgi_support:
+        old_create_request = cls.create_request
+
+        @ensure_integration_enabled(DjangoIntegration, old_create_request)
+        def sentry_patched_create_request(self, *args, **kwargs):
+            # type: (Any, *Any, **Any) -> Any
+            request, error_response = old_create_request(self, *args, **kwargs)
+            scope = sentry_sdk.get_isolation_scope()
+            scope.add_event_processor(_make_asgi_request_event_processor(request))
+
+            return request, error_response
+
+        cls.create_request = sentry_patched_create_request
+
+
+def patch_get_response_async(cls, _before_get_response):
+    # type: (Any, Any) -> None
+    old_get_response_async = cls.get_response_async
+
+    async def sentry_patched_get_response_async(self, request):
+        # type: (Any, Any) -> Union[HttpResponse, BaseException]
+        _before_get_response(request)
+        return await old_get_response_async(self, request)
+
+    cls.get_response_async = sentry_patched_get_response_async
+
+
+def patch_channels_asgi_handler_impl(cls):
+    # type: (Any) -> None
+    import channels  # type: ignore
+
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    if channels.__version__ < "3.0.0":
+        old_app = cls.__call__
+
+        async def sentry_patched_asgi_handler(self, receive, send):
+            # type: (Any, Any, Any) -> Any
+            integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+            if integration is None:
+                return await old_app(self, receive, send)
+
+            middleware = SentryAsgiMiddleware(
+                lambda _scope: old_app.__get__(self, cls),
+                unsafe_context_data=True,
+                span_origin=DjangoIntegration.origin,
+                http_methods_to_capture=integration.http_methods_to_capture,
+            )
+
+            return await middleware(self.scope)(receive, send)
+
+        cls.__call__ = sentry_patched_asgi_handler
+
+    else:
+        # The ASGI handler in Channels >= 3 has the same signature as
+        # the Django handler.
+        patch_django_asgi_handler_impl(cls)
+
+
+def wrap_async_view(callback):
+    # type: (Any) -> Any
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    @functools.wraps(callback)
+    async def sentry_wrapped_callback(request, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        current_scope = sentry_sdk.get_current_scope()
+        if current_scope.transaction is not None:
+            current_scope.transaction.update_active_thread()
+
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        if sentry_scope.profile is not None:
+            sentry_scope.profile.update_active_thread_id()
+
+        with sentry_sdk.start_span(
+            op=OP.VIEW_RENDER,
+            name=request.resolver_match.view_name,
+            origin=DjangoIntegration.origin,
+        ):
+            return await callback(request, *args, **kwargs)
+
+    return sentry_wrapped_callback
+
+
+def _asgi_middleware_mixin_factory(_check_middleware_span):
+    # type: (Callable[..., Any]) -> Any
+    """
+    Mixin class factory that generates a middleware mixin for handling requests
+    in async mode.
+    """
+
+    class SentryASGIMixin:
+        if TYPE_CHECKING:
+            _inner = None
+
+        def __init__(self, get_response):
+            # type: (Callable[..., Any]) -> None
+            self.get_response = get_response
+            self._acall_method = None
+            self._async_check()
+
+        def _async_check(self):
+            # type: () -> None
+            """
+            If get_response is a coroutine function, turns us into async mode so
+            a thread is not consumed during a whole request.
+            Taken from django.utils.deprecation::MiddlewareMixin._async_check
+            """
+            if iscoroutinefunction(self.get_response):
+                markcoroutinefunction(self)
+
+        def async_route_check(self):
+            # type: () -> bool
+            """
+            Function that checks if we are in async mode,
+            and if we are forwards the handling of requests to __acall__
+            """
+            return iscoroutinefunction(self.get_response)
+
+        async def __acall__(self, *args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            f = self._acall_method
+            if f is None:
+                if hasattr(self._inner, "__acall__"):
+                    self._acall_method = f = self._inner.__acall__  # type: ignore
+                else:
+                    self._acall_method = f = self._inner
+
+            middleware_span = _check_middleware_span(old_method=f)
+
+            if middleware_span is None:
+                return await f(*args, **kwargs)
+
+            with middleware_span:
+                return await f(*args, **kwargs)
+
+    return SentryASGIMixin
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/caching.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/caching.py
new file mode 100644
index 00000000..79856117
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/caching.py
@@ -0,0 +1,191 @@
+import functools
+from typing import TYPE_CHECKING
+from sentry_sdk.integrations.redis.utils import _get_safe_key, _key_as_string
+from urllib3.util import parse_url as urlparse
+
+from django import VERSION as DJANGO_VERSION
+from django.core.cache import CacheHandler
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+)
+
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Optional
+
+
+METHODS_TO_INSTRUMENT = [
+    "set",
+    "set_many",
+    "get",
+    "get_many",
+]
+
+
+def _get_span_description(method_name, args, kwargs):
+    # type: (str, tuple[Any], dict[str, Any]) -> str
+    return _key_as_string(_get_safe_key(method_name, args, kwargs))
+
+
+def _patch_cache_method(cache, method_name, address, port):
+    # type: (CacheHandler, str, Optional[str], Optional[int]) -> None
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    original_method = getattr(cache, method_name)
+
+    @ensure_integration_enabled(DjangoIntegration, original_method)
+    def _instrument_call(
+        cache, method_name, original_method, args, kwargs, address, port
+    ):
+        # type: (CacheHandler, str, Callable[..., Any], tuple[Any, ...], dict[str, Any], Optional[str], Optional[int]) -> Any
+        is_set_operation = method_name.startswith("set")
+        is_get_operation = not is_set_operation
+
+        op = OP.CACHE_PUT if is_set_operation else OP.CACHE_GET
+        description = _get_span_description(method_name, args, kwargs)
+
+        with sentry_sdk.start_span(
+            op=op,
+            name=description,
+            origin=DjangoIntegration.origin,
+        ) as span:
+            value = original_method(*args, **kwargs)
+
+            with capture_internal_exceptions():
+                if address is not None:
+                    span.set_data(SPANDATA.NETWORK_PEER_ADDRESS, address)
+
+                if port is not None:
+                    span.set_data(SPANDATA.NETWORK_PEER_PORT, port)
+
+                key = _get_safe_key(method_name, args, kwargs)
+                if key is not None:
+                    span.set_data(SPANDATA.CACHE_KEY, key)
+
+                item_size = None
+                if is_get_operation:
+                    if value:
+                        item_size = len(str(value))
+                        span.set_data(SPANDATA.CACHE_HIT, True)
+                    else:
+                        span.set_data(SPANDATA.CACHE_HIT, False)
+                else:  # TODO: We don't handle `get_or_set` which we should
+                    arg_count = len(args)
+                    if arg_count >= 2:
+                        # 'set' command
+                        item_size = len(str(args[1]))
+                    elif arg_count == 1:
+                        # 'set_many' command
+                        item_size = len(str(args[0]))
+
+                if item_size is not None:
+                    span.set_data(SPANDATA.CACHE_ITEM_SIZE, item_size)
+
+            return value
+
+    @functools.wraps(original_method)
+    def sentry_method(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        return _instrument_call(
+            cache, method_name, original_method, args, kwargs, address, port
+        )
+
+    setattr(cache, method_name, sentry_method)
+
+
+def _patch_cache(cache, address=None, port=None):
+    # type: (CacheHandler, Optional[str], Optional[int]) -> None
+    if not hasattr(cache, "_sentry_patched"):
+        for method_name in METHODS_TO_INSTRUMENT:
+            _patch_cache_method(cache, method_name, address, port)
+        cache._sentry_patched = True
+
+
+def _get_address_port(settings):
+    # type: (dict[str, Any]) -> tuple[Optional[str], Optional[int]]
+    location = settings.get("LOCATION")
+
+    # TODO: location can also be an array of locations
+    #       see: https://docs.djangoproject.com/en/5.0/topics/cache/#redis
+    #       GitHub issue: https://github.com/getsentry/sentry-python/issues/3062
+    if not isinstance(location, str):
+        return None, None
+
+    if "://" in location:
+        parsed_url = urlparse(location)
+        # remove the username and password from URL to not leak sensitive data.
+        address = "{}://{}{}".format(
+            parsed_url.scheme or "",
+            parsed_url.hostname or "",
+            parsed_url.path or "",
+        )
+        port = parsed_url.port
+    else:
+        address = location
+        port = None
+
+    return address, int(port) if port is not None else None
+
+
+def should_enable_cache_spans():
+    # type: () -> bool
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    client = sentry_sdk.get_client()
+    integration = client.get_integration(DjangoIntegration)
+    from django.conf import settings
+
+    return integration is not None and (
+        (client.spotlight is not None and settings.DEBUG is True)
+        or integration.cache_spans is True
+    )
+
+
+def patch_caching():
+    # type: () -> None
+    if not hasattr(CacheHandler, "_sentry_patched"):
+        if DJANGO_VERSION < (3, 2):
+            original_get_item = CacheHandler.__getitem__
+
+            @functools.wraps(original_get_item)
+            def sentry_get_item(self, alias):
+                # type: (CacheHandler, str) -> Any
+                cache = original_get_item(self, alias)
+
+                if should_enable_cache_spans():
+                    from django.conf import settings
+
+                    address, port = _get_address_port(
+                        settings.CACHES[alias or "default"]
+                    )
+
+                    _patch_cache(cache, address, port)
+
+                return cache
+
+            CacheHandler.__getitem__ = sentry_get_item
+            CacheHandler._sentry_patched = True
+
+        else:
+            original_create_connection = CacheHandler.create_connection
+
+            @functools.wraps(original_create_connection)
+            def sentry_create_connection(self, alias):
+                # type: (CacheHandler, str) -> Any
+                cache = original_create_connection(self, alias)
+
+                if should_enable_cache_spans():
+                    address, port = _get_address_port(self.settings[alias or "default"])
+
+                    _patch_cache(cache, address, port)
+
+                return cache
+
+            CacheHandler.create_connection = sentry_create_connection
+            CacheHandler._sentry_patched = True
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/middleware.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/middleware.py
new file mode 100644
index 00000000..24527656
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/middleware.py
@@ -0,0 +1,187 @@
+"""
+Create spans from Django middleware invocations
+"""
+
+from functools import wraps
+
+from django import VERSION as DJANGO_VERSION
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.utils import (
+    ContextVar,
+    transaction_from_function,
+    capture_internal_exceptions,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Optional
+    from typing import TypeVar
+
+    from sentry_sdk.tracing import Span
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+_import_string_should_wrap_middleware = ContextVar(
+    "import_string_should_wrap_middleware"
+)
+
+DJANGO_SUPPORTS_ASYNC_MIDDLEWARE = DJANGO_VERSION >= (3, 1)
+
+if not DJANGO_SUPPORTS_ASYNC_MIDDLEWARE:
+    _asgi_middleware_mixin_factory = lambda _: object
+else:
+    from .asgi import _asgi_middleware_mixin_factory
+
+
+def patch_django_middlewares():
+    # type: () -> None
+    from django.core.handlers import base
+
+    old_import_string = base.import_string
+
+    def sentry_patched_import_string(dotted_path):
+        # type: (str) -> Any
+        rv = old_import_string(dotted_path)
+
+        if _import_string_should_wrap_middleware.get(None):
+            rv = _wrap_middleware(rv, dotted_path)
+
+        return rv
+
+    base.import_string = sentry_patched_import_string
+
+    old_load_middleware = base.BaseHandler.load_middleware
+
+    def sentry_patched_load_middleware(*args, **kwargs):
+        # type: (Any, Any) -> Any
+        _import_string_should_wrap_middleware.set(True)
+        try:
+            return old_load_middleware(*args, **kwargs)
+        finally:
+            _import_string_should_wrap_middleware.set(False)
+
+    base.BaseHandler.load_middleware = sentry_patched_load_middleware
+
+
+def _wrap_middleware(middleware, middleware_name):
+    # type: (Any, str) -> Any
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    def _check_middleware_span(old_method):
+        # type: (Callable[..., Any]) -> Optional[Span]
+        integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+        if integration is None or not integration.middleware_spans:
+            return None
+
+        function_name = transaction_from_function(old_method)
+
+        description = middleware_name
+        function_basename = getattr(old_method, "__name__", None)
+        if function_basename:
+            description = "{}.{}".format(description, function_basename)
+
+        middleware_span = sentry_sdk.start_span(
+            op=OP.MIDDLEWARE_DJANGO,
+            name=description,
+            origin=DjangoIntegration.origin,
+        )
+        middleware_span.set_tag("django.function_name", function_name)
+        middleware_span.set_tag("django.middleware_name", middleware_name)
+
+        return middleware_span
+
+    def _get_wrapped_method(old_method):
+        # type: (F) -> F
+        with capture_internal_exceptions():
+
+            def sentry_wrapped_method(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                middleware_span = _check_middleware_span(old_method)
+
+                if middleware_span is None:
+                    return old_method(*args, **kwargs)
+
+                with middleware_span:
+                    return old_method(*args, **kwargs)
+
+            try:
+                # fails for __call__ of function on Python 2 (see py2.7-django-1.11)
+                sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)
+
+                # Necessary for Django 3.1
+                sentry_wrapped_method.__self__ = old_method.__self__  # type: ignore
+            except Exception:
+                pass
+
+            return sentry_wrapped_method  # type: ignore
+
+        return old_method
+
+    class SentryWrappingMiddleware(
+        _asgi_middleware_mixin_factory(_check_middleware_span)  # type: ignore
+    ):
+        sync_capable = getattr(middleware, "sync_capable", True)
+        async_capable = DJANGO_SUPPORTS_ASYNC_MIDDLEWARE and getattr(
+            middleware, "async_capable", False
+        )
+
+        def __init__(self, get_response=None, *args, **kwargs):
+            # type: (Optional[Callable[..., Any]], *Any, **Any) -> None
+            if get_response:
+                self._inner = middleware(get_response, *args, **kwargs)
+            else:
+                self._inner = middleware(*args, **kwargs)
+            self.get_response = get_response
+            self._call_method = None
+            if self.async_capable:
+                super().__init__(get_response)
+
+        # We need correct behavior for `hasattr()`, which we can only determine
+        # when we have an instance of the middleware we're wrapping.
+        def __getattr__(self, method_name):
+            # type: (str) -> Any
+            if method_name not in (
+                "process_request",
+                "process_view",
+                "process_template_response",
+                "process_response",
+                "process_exception",
+            ):
+                raise AttributeError()
+
+            old_method = getattr(self._inner, method_name)
+            rv = _get_wrapped_method(old_method)
+            self.__dict__[method_name] = rv
+            return rv
+
+        def __call__(self, *args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            if hasattr(self, "async_route_check") and self.async_route_check():
+                return self.__acall__(*args, **kwargs)
+
+            f = self._call_method
+            if f is None:
+                self._call_method = f = self._inner.__call__
+
+            middleware_span = _check_middleware_span(old_method=f)
+
+            if middleware_span is None:
+                return f(*args, **kwargs)
+
+            with middleware_span:
+                return f(*args, **kwargs)
+
+    for attr in (
+        "__name__",
+        "__module__",
+        "__qualname__",
+    ):
+        if hasattr(middleware, attr):
+            setattr(SentryWrappingMiddleware, attr, getattr(middleware, attr))
+
+    return SentryWrappingMiddleware
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/signals_handlers.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/signals_handlers.py
new file mode 100644
index 00000000..cb0f8b9d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/signals_handlers.py
@@ -0,0 +1,91 @@
+from functools import wraps
+
+from django.dispatch import Signal
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations.django import DJANGO_VERSION
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import Any, Union
+
+
+def _get_receiver_name(receiver):
+    # type: (Callable[..., Any]) -> str
+    name = ""
+
+    if hasattr(receiver, "__qualname__"):
+        name = receiver.__qualname__
+    elif hasattr(receiver, "__name__"):  # Python 2.7 has no __qualname__
+        name = receiver.__name__
+    elif hasattr(
+        receiver, "func"
+    ):  # certain functions (like partials) dont have a name
+        if hasattr(receiver, "func") and hasattr(receiver.func, "__name__"):
+            name = "partial(<function " + receiver.func.__name__ + ">)"
+
+    if (
+        name == ""
+    ):  # In case nothing was found, return the string representation (this is the slowest case)
+        return str(receiver)
+
+    if hasattr(receiver, "__module__"):  # prepend with module, if there is one
+        name = receiver.__module__ + "." + name
+
+    return name
+
+
+def patch_signals():
+    # type: () -> None
+    """
+    Patch django signal receivers to create a span.
+
+    This only wraps sync receivers. Django>=5.0 introduced async receivers, but
+    since we don't create transactions for ASGI Django, we don't wrap them.
+    """
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    old_live_receivers = Signal._live_receivers
+
+    def _sentry_live_receivers(self, sender):
+        # type: (Signal, Any) -> Union[tuple[list[Callable[..., Any]], list[Callable[..., Any]]], list[Callable[..., Any]]]
+        if DJANGO_VERSION >= (5, 0):
+            sync_receivers, async_receivers = old_live_receivers(self, sender)
+        else:
+            sync_receivers = old_live_receivers(self, sender)
+            async_receivers = []
+
+        def sentry_sync_receiver_wrapper(receiver):
+            # type: (Callable[..., Any]) -> Callable[..., Any]
+            @wraps(receiver)
+            def wrapper(*args, **kwargs):
+                # type: (Any, Any) -> Any
+                signal_name = _get_receiver_name(receiver)
+                with sentry_sdk.start_span(
+                    op=OP.EVENT_DJANGO,
+                    name=signal_name,
+                    origin=DjangoIntegration.origin,
+                ) as span:
+                    span.set_data("signal", signal_name)
+                    return receiver(*args, **kwargs)
+
+            return wrapper
+
+        integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+        if (
+            integration
+            and integration.signals_spans
+            and self not in integration.signals_denylist
+        ):
+            for idx, receiver in enumerate(sync_receivers):
+                sync_receivers[idx] = sentry_sync_receiver_wrapper(receiver)
+
+        if DJANGO_VERSION >= (5, 0):
+            return sync_receivers, async_receivers
+        else:
+            return sync_receivers
+
+    Signal._live_receivers = _sentry_live_receivers
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/templates.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/templates.py
new file mode 100644
index 00000000..10e8a924
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/templates.py
@@ -0,0 +1,188 @@
+import functools
+
+from django.template import TemplateSyntaxError
+from django.utils.safestring import mark_safe
+from django import VERSION as DJANGO_VERSION
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.utils import ensure_integration_enabled
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Optional
+    from typing import Iterator
+    from typing import Tuple
+
+try:
+    # support Django 1.9
+    from django.template.base import Origin
+except ImportError:
+    # backward compatibility
+    from django.template.loader import LoaderOrigin as Origin
+
+
+def get_template_frame_from_exception(exc_value):
+    # type: (Optional[BaseException]) -> Optional[Dict[str, Any]]
+
+    # As of Django 1.9 or so the new template debug thing showed up.
+    if hasattr(exc_value, "template_debug"):
+        return _get_template_frame_from_debug(exc_value.template_debug)  # type: ignore
+
+    # As of r16833 (Django) all exceptions may contain a
+    # ``django_template_source`` attribute (rather than the legacy
+    # ``TemplateSyntaxError.source`` check)
+    if hasattr(exc_value, "django_template_source"):
+        return _get_template_frame_from_source(
+            exc_value.django_template_source  # type: ignore
+        )
+
+    if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, "source"):
+        source = exc_value.source
+        if isinstance(source, (tuple, list)) and isinstance(source[0], Origin):
+            return _get_template_frame_from_source(source)  # type: ignore
+
+    return None
+
+
+def _get_template_name_description(template_name):
+    # type: (str) -> str
+    if isinstance(template_name, (list, tuple)):
+        if template_name:
+            return "[{}, ...]".format(template_name[0])
+    else:
+        return template_name
+
+
+def patch_templates():
+    # type: () -> None
+    from django.template.response import SimpleTemplateResponse
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    real_rendered_content = SimpleTemplateResponse.rendered_content
+
+    @property  # type: ignore
+    @ensure_integration_enabled(DjangoIntegration, real_rendered_content.fget)
+    def rendered_content(self):
+        # type: (SimpleTemplateResponse) -> str
+        with sentry_sdk.start_span(
+            op=OP.TEMPLATE_RENDER,
+            name=_get_template_name_description(self.template_name),
+            origin=DjangoIntegration.origin,
+        ) as span:
+            span.set_data("context", self.context_data)
+            return real_rendered_content.fget(self)
+
+    SimpleTemplateResponse.rendered_content = rendered_content
+
+    if DJANGO_VERSION < (1, 7):
+        return
+    import django.shortcuts
+
+    real_render = django.shortcuts.render
+
+    @functools.wraps(real_render)
+    @ensure_integration_enabled(DjangoIntegration, real_render)
+    def render(request, template_name, context=None, *args, **kwargs):
+        # type: (django.http.HttpRequest, str, Optional[Dict[str, Any]], *Any, **Any) -> django.http.HttpResponse
+
+        # Inject trace meta tags into template context
+        context = context or {}
+        if "sentry_trace_meta" not in context:
+            context["sentry_trace_meta"] = mark_safe(
+                sentry_sdk.get_current_scope().trace_propagation_meta()
+            )
+
+        with sentry_sdk.start_span(
+            op=OP.TEMPLATE_RENDER,
+            name=_get_template_name_description(template_name),
+            origin=DjangoIntegration.origin,
+        ) as span:
+            span.set_data("context", context)
+            return real_render(request, template_name, context, *args, **kwargs)
+
+    django.shortcuts.render = render
+
+
+def _get_template_frame_from_debug(debug):
+    # type: (Dict[str, Any]) -> Dict[str, Any]
+    if debug is None:
+        return None
+
+    lineno = debug["line"]
+    filename = debug["name"]
+    if filename is None:
+        filename = "<django template>"
+
+    pre_context = []
+    post_context = []
+    context_line = None
+
+    for i, line in debug["source_lines"]:
+        if i < lineno:
+            pre_context.append(line)
+        elif i > lineno:
+            post_context.append(line)
+        else:
+            context_line = line
+
+    return {
+        "filename": filename,
+        "lineno": lineno,
+        "pre_context": pre_context[-5:],
+        "post_context": post_context[:5],
+        "context_line": context_line,
+        "in_app": True,
+    }
+
+
+def _linebreak_iter(template_source):
+    # type: (str) -> Iterator[int]
+    yield 0
+    p = template_source.find("\n")
+    while p >= 0:
+        yield p + 1
+        p = template_source.find("\n", p + 1)
+
+
+def _get_template_frame_from_source(source):
+    # type: (Tuple[Origin, Tuple[int, int]]) -> Optional[Dict[str, Any]]
+    if not source:
+        return None
+
+    origin, (start, end) = source
+    filename = getattr(origin, "loadname", None)
+    if filename is None:
+        filename = "<django template>"
+    template_source = origin.reload()
+    lineno = None
+    upto = 0
+    pre_context = []
+    post_context = []
+    context_line = None
+
+    for num, next in enumerate(_linebreak_iter(template_source)):
+        line = template_source[upto:next]
+        if start >= upto and end <= next:
+            lineno = num
+            context_line = line
+        elif lineno is None:
+            pre_context.append(line)
+        else:
+            post_context.append(line)
+
+        upto = next
+
+    if context_line is None or lineno is None:
+        return None
+
+    return {
+        "filename": filename,
+        "lineno": lineno,
+        "pre_context": pre_context[-5:],
+        "post_context": post_context[:5],
+        "context_line": context_line,
+    }
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/transactions.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/transactions.py
new file mode 100644
index 00000000..5a7d69f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/transactions.py
@@ -0,0 +1,159 @@
+"""
+Copied from raven-python.
+
+Despite being called "legacy" in some places this resolver is very much still
+in use.
+"""
+
+import re
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from django.urls.resolvers import URLResolver
+    from typing import Dict
+    from typing import List
+    from typing import Optional
+    from django.urls.resolvers import URLPattern
+    from typing import Tuple
+    from typing import Union
+    from re import Pattern
+
+from django import VERSION as DJANGO_VERSION
+
+if DJANGO_VERSION >= (2, 0):
+    from django.urls.resolvers import RoutePattern
+else:
+    RoutePattern = None
+
+try:
+    from django.urls import get_resolver
+except ImportError:
+    from django.core.urlresolvers import get_resolver
+
+
+def get_regex(resolver_or_pattern):
+    # type: (Union[URLPattern, URLResolver]) -> Pattern[str]
+    """Utility method for django's deprecated resolver.regex"""
+    try:
+        regex = resolver_or_pattern.regex
+    except AttributeError:
+        regex = resolver_or_pattern.pattern.regex
+    return regex
+
+
+class RavenResolver:
+    _new_style_group_matcher = re.compile(
+        r"<(?:([^>:]+):)?([^>]+)>"
+    )  # https://github.com/django/django/blob/21382e2743d06efbf5623e7c9b6dccf2a325669b/django/urls/resolvers.py#L245-L247
+    _optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
+    _named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)+")
+    _non_named_group_matcher = re.compile(r"\([^\)]+\)")
+    # [foo|bar|baz]
+    _either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
+    _camel_re = re.compile(r"([A-Z]+)([a-z])")
+
+    _cache = {}  # type: Dict[URLPattern, str]
+
+    def _simplify(self, pattern):
+        # type: (Union[URLPattern, URLResolver]) -> str
+        r"""
+        Clean up urlpattern regexes into something readable by humans:
+
+        From:
+        > "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
+
+        To:
+        > "{sport_slug}/athletes/{athlete_slug}/"
+        """
+        # "new-style" path patterns can be parsed directly without turning them
+        # into regexes first
+        if (
+            RoutePattern is not None
+            and hasattr(pattern, "pattern")
+            and isinstance(pattern.pattern, RoutePattern)
+        ):
+            return self._new_style_group_matcher.sub(
+                lambda m: "{%s}" % m.group(2), str(pattern.pattern._route)
+            )
+
+        result = get_regex(pattern).pattern
+
+        # remove optional params
+        # TODO(dcramer): it'd be nice to change these into [%s] but it currently
+        # conflicts with the other rules because we're doing regexp matches
+        # rather than parsing tokens
+        result = self._optional_group_matcher.sub(lambda m: "%s" % m.group(1), result)
+
+        # handle named groups first
+        result = self._named_group_matcher.sub(lambda m: "{%s}" % m.group(1), result)
+
+        # handle non-named groups
+        result = self._non_named_group_matcher.sub("{var}", result)
+
+        # handle optional params
+        result = self._either_option_matcher.sub(lambda m: m.group(1), result)
+
+        # clean up any outstanding regex-y characters.
+        result = (
+            result.replace("^", "")
+            .replace("$", "")
+            .replace("?", "")
+            .replace("\\A", "")
+            .replace("\\Z", "")
+            .replace("//", "/")
+            .replace("\\", "")
+        )
+
+        return result
+
+    def _resolve(self, resolver, path, parents=None):
+        # type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]
+
+        match = get_regex(resolver).search(path)  # Django < 2.0
+
+        if not match:
+            return None
+
+        if parents is None:
+            parents = [resolver]
+        elif resolver not in parents:
+            parents = parents + [resolver]
+
+        new_path = path[match.end() :]
+        for pattern in resolver.url_patterns:
+            # this is an include()
+            if not pattern.callback:
+                match_ = self._resolve(pattern, new_path, parents)
+                if match_:
+                    return match_
+                continue
+            elif not get_regex(pattern).search(new_path):
+                continue
+
+            try:
+                return self._cache[pattern]
+            except KeyError:
+                pass
+
+            prefix = "".join(self._simplify(p) for p in parents)
+            result = prefix + self._simplify(pattern)
+            if not result.startswith("/"):
+                result = "/" + result
+            self._cache[pattern] = result
+            return result
+
+        return None
+
+    def resolve(
+        self,
+        path,  # type: str
+        urlconf=None,  # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]
+    ):
+        # type: (...) -> Optional[str]
+        resolver = get_resolver(urlconf)
+        match = self._resolve(resolver, path)
+        return match
+
+
+LEGACY_RESOLVER = RavenResolver()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/views.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/views.py
new file mode 100644
index 00000000..0a9861a6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/django/views.py
@@ -0,0 +1,96 @@
+import functools
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+
+
+try:
+    from asyncio import iscoroutinefunction
+except ImportError:
+    iscoroutinefunction = None  # type: ignore
+
+
+try:
+    from sentry_sdk.integrations.django.asgi import wrap_async_view
+except (ImportError, SyntaxError):
+    wrap_async_view = None  # type: ignore
+
+
+def patch_views():
+    # type: () -> None
+
+    from django.core.handlers.base import BaseHandler
+    from django.template.response import SimpleTemplateResponse
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    old_make_view_atomic = BaseHandler.make_view_atomic
+    old_render = SimpleTemplateResponse.render
+
+    def sentry_patched_render(self):
+        # type: (SimpleTemplateResponse) -> Any
+        with sentry_sdk.start_span(
+            op=OP.VIEW_RESPONSE_RENDER,
+            name="serialize response",
+            origin=DjangoIntegration.origin,
+        ):
+            return old_render(self)
+
+    @functools.wraps(old_make_view_atomic)
+    def sentry_patched_make_view_atomic(self, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        callback = old_make_view_atomic(self, *args, **kwargs)
+
+        # XXX: The wrapper function is created for every request. Find more
+        # efficient way to wrap views (or build a cache?)
+
+        integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
+        if integration is not None and integration.middleware_spans:
+            is_async_view = (
+                iscoroutinefunction is not None
+                and wrap_async_view is not None
+                and iscoroutinefunction(callback)
+            )
+            if is_async_view:
+                sentry_wrapped_callback = wrap_async_view(callback)
+            else:
+                sentry_wrapped_callback = _wrap_sync_view(callback)
+
+        else:
+            sentry_wrapped_callback = callback
+
+        return sentry_wrapped_callback
+
+    SimpleTemplateResponse.render = sentry_patched_render
+    BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
+
+
+def _wrap_sync_view(callback):
+    # type: (Any) -> Any
+    from sentry_sdk.integrations.django import DjangoIntegration
+
+    @functools.wraps(callback)
+    def sentry_wrapped_callback(request, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        current_scope = sentry_sdk.get_current_scope()
+        if current_scope.transaction is not None:
+            current_scope.transaction.update_active_thread()
+
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        # set the active thread id to the handler thread for sync views
+        # this isn't necessary for async views since that runs on main
+        if sentry_scope.profile is not None:
+            sentry_scope.profile.update_active_thread_id()
+
+        with sentry_sdk.start_span(
+            op=OP.VIEW_RENDER,
+            name=request.resolver_match.view_name,
+            origin=DjangoIntegration.origin,
+        ):
+            return callback(request, *args, **kwargs)
+
+    return sentry_wrapped_callback
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dramatiq.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dramatiq.py
new file mode 100644
index 00000000..f9ef13e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/dramatiq.py
@@ -0,0 +1,168 @@
+import json
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations._wsgi_common import request_body_within_bounds
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    capture_internal_exceptions,
+    event_from_exception,
+)
+
+from dramatiq.broker import Broker  # type: ignore
+from dramatiq.message import Message  # type: ignore
+from dramatiq.middleware import Middleware, default_middleware  # type: ignore
+from dramatiq.errors import Retry  # type: ignore
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Dict, Optional, Union
+    from sentry_sdk._types import Event, Hint
+
+
+class DramatiqIntegration(Integration):
+    """
+    Dramatiq integration for Sentry
+
+    Please make sure that you call `sentry_sdk.init` *before* initializing
+    your broker, as it monkey patches `Broker.__init__`.
+
+    This integration was originally developed and maintained
+    by https://github.com/jacobsvante and later donated to the Sentry
+    project.
+    """
+
+    identifier = "dramatiq"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _patch_dramatiq_broker()
+
+
+def _patch_dramatiq_broker():
+    # type: () -> None
+    original_broker__init__ = Broker.__init__
+
+    def sentry_patched_broker__init__(self, *args, **kw):
+        # type: (Broker, *Any, **Any) -> None
+        integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
+
+        try:
+            middleware = kw.pop("middleware")
+        except KeyError:
+            # Unfortunately Broker and StubBroker allows middleware to be
+            # passed in as positional arguments, whilst RabbitmqBroker and
+            # RedisBroker does not.
+            if len(args) == 1:
+                middleware = args[0]
+                args = []  # type: ignore
+            else:
+                middleware = None
+
+        if middleware is None:
+            middleware = list(m() for m in default_middleware)
+        else:
+            middleware = list(middleware)
+
+        if integration is not None:
+            middleware = [m for m in middleware if not isinstance(m, SentryMiddleware)]
+            middleware.insert(0, SentryMiddleware())
+
+        kw["middleware"] = middleware
+        original_broker__init__(self, *args, **kw)
+
+    Broker.__init__ = sentry_patched_broker__init__
+
+
+class SentryMiddleware(Middleware):  # type: ignore[misc]
+    """
+    A Dramatiq middleware that automatically captures and sends
+    exceptions to Sentry.
+
+    This is automatically added to every instantiated broker via the
+    DramatiqIntegration.
+    """
+
+    def before_process_message(self, broker, message):
+        # type: (Broker, Message) -> None
+        integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
+        if integration is None:
+            return
+
+        message._scope_manager = sentry_sdk.new_scope()
+        message._scope_manager.__enter__()
+
+        scope = sentry_sdk.get_current_scope()
+        scope.transaction = message.actor_name
+        scope.set_extra("dramatiq_message_id", message.message_id)
+        scope.add_event_processor(_make_message_event_processor(message, integration))
+
+    def after_process_message(self, broker, message, *, result=None, exception=None):
+        # type: (Broker, Message, Any, Optional[Any], Optional[Exception]) -> None
+        integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
+        if integration is None:
+            return
+
+        actor = broker.get_actor(message.actor_name)
+        throws = message.options.get("throws") or actor.options.get("throws")
+
+        try:
+            if (
+                exception is not None
+                and not (throws and isinstance(exception, throws))
+                and not isinstance(exception, Retry)
+            ):
+                event, hint = event_from_exception(
+                    exception,
+                    client_options=sentry_sdk.get_client().options,
+                    mechanism={
+                        "type": DramatiqIntegration.identifier,
+                        "handled": False,
+                    },
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+        finally:
+            message._scope_manager.__exit__(None, None, None)
+
+
+def _make_message_event_processor(message, integration):
+    # type: (Message, DramatiqIntegration) -> Callable[[Event, Hint], Optional[Event]]
+
+    def inner(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+        with capture_internal_exceptions():
+            DramatiqMessageExtractor(message).extract_into_event(event)
+
+        return event
+
+    return inner
+
+
+class DramatiqMessageExtractor:
+    def __init__(self, message):
+        # type: (Message) -> None
+        self.message_data = dict(message.asdict())
+
+    def content_length(self):
+        # type: () -> int
+        return len(json.dumps(self.message_data))
+
+    def extract_into_event(self, event):
+        # type: (Event) -> None
+        client = sentry_sdk.get_client()
+        if not client.is_active():
+            return
+
+        contexts = event.setdefault("contexts", {})
+        request_info = contexts.setdefault("dramatiq", {})
+        request_info["type"] = "dramatiq"
+
+        data = None  # type: Optional[Union[AnnotatedValue, Dict[str, Any]]]
+        if not request_body_within_bounds(client, self.content_length()):
+            data = AnnotatedValue.removed_because_over_size_limit()
+        else:
+            data = self.message_data
+
+        request_info["data"] = data
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/excepthook.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/excepthook.py
new file mode 100644
index 00000000..61c7e460
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/excepthook.py
@@ -0,0 +1,83 @@
+import sys
+
+import sentry_sdk
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+)
+from sentry_sdk.integrations import Integration
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Callable
+    from typing import Any
+    from typing import Type
+    from typing import Optional
+
+    from types import TracebackType
+
+    Excepthook = Callable[
+        [Type[BaseException], BaseException, Optional[TracebackType]],
+        Any,
+    ]
+
+
+class ExcepthookIntegration(Integration):
+    identifier = "excepthook"
+
+    always_run = False
+
+    def __init__(self, always_run=False):
+        # type: (bool) -> None
+
+        if not isinstance(always_run, bool):
+            raise ValueError(
+                "Invalid value for always_run: %s (must be type boolean)"
+                % (always_run,)
+            )
+        self.always_run = always_run
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        sys.excepthook = _make_excepthook(sys.excepthook)
+
+
+def _make_excepthook(old_excepthook):
+    # type: (Excepthook) -> Excepthook
+    def sentry_sdk_excepthook(type_, value, traceback):
+        # type: (Type[BaseException], BaseException, Optional[TracebackType]) -> None
+        integration = sentry_sdk.get_client().get_integration(ExcepthookIntegration)
+
+        # Note: If  we replace this with ensure_integration_enabled then
+        # we break the exceptiongroup backport;
+        # See: https://github.com/getsentry/sentry-python/issues/3097
+        if integration is None:
+            return old_excepthook(type_, value, traceback)
+
+        if _should_send(integration.always_run):
+            with capture_internal_exceptions():
+                event, hint = event_from_exception(
+                    (type_, value, traceback),
+                    client_options=sentry_sdk.get_client().options,
+                    mechanism={"type": "excepthook", "handled": False},
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+
+        return old_excepthook(type_, value, traceback)
+
+    return sentry_sdk_excepthook
+
+
+def _should_send(always_run=False):
+    # type: (bool) -> bool
+    if always_run:
+        return True
+
+    if hasattr(sys, "ps1"):
+        # Disable the excepthook for interactive Python shells, otherwise
+        # every typo gets sent to Sentry.
+        return False
+
+    return True
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/executing.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/executing.py
new file mode 100644
index 00000000..6e68b8c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/executing.py
@@ -0,0 +1,67 @@
+import sentry_sdk
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.utils import walk_exception_chain, iter_stacks
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+
+    from sentry_sdk._types import Event, Hint
+
+try:
+    import executing
+except ImportError:
+    raise DidNotEnable("executing is not installed")
+
+
+class ExecutingIntegration(Integration):
+    identifier = "executing"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        @add_global_event_processor
+        def add_executing_info(event, hint):
+            # type: (Event, Optional[Hint]) -> Optional[Event]
+            if sentry_sdk.get_client().get_integration(ExecutingIntegration) is None:
+                return event
+
+            if hint is None:
+                return event
+
+            exc_info = hint.get("exc_info", None)
+
+            if exc_info is None:
+                return event
+
+            exception = event.get("exception", None)
+
+            if exception is None:
+                return event
+
+            values = exception.get("values", None)
+
+            if values is None:
+                return event
+
+            for exception, (_exc_type, _exc_value, exc_tb) in zip(
+                reversed(values), walk_exception_chain(exc_info)
+            ):
+                sentry_frames = [
+                    frame
+                    for frame in exception.get("stacktrace", {}).get("frames", [])
+                    if frame.get("function")
+                ]
+                tbs = list(iter_stacks(exc_tb))
+                if len(sentry_frames) != len(tbs):
+                    continue
+
+                for sentry_frame, tb in zip(sentry_frames, tbs):
+                    frame = tb.tb_frame
+                    source = executing.Source.for_frame(frame)
+                    sentry_frame["function"] = source.code_qualname(frame.f_code)
+
+            return event
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/falcon.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/falcon.py
new file mode 100644
index 00000000..ddedcb10
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/falcon.py
@@ -0,0 +1,272 @@
+import sentry_sdk
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.tracing import SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    parse_version,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Optional
+
+    from sentry_sdk._types import Event, EventProcessor
+
+# In Falcon 3.0 `falcon.api_helpers` is renamed to `falcon.app_helpers`
+# and `falcon.API` to `falcon.App`
+
+try:
+    import falcon  # type: ignore
+
+    from falcon import __version__ as FALCON_VERSION
+except ImportError:
+    raise DidNotEnable("Falcon not installed")
+
+try:
+    import falcon.app_helpers  # type: ignore
+
+    falcon_helpers = falcon.app_helpers
+    falcon_app_class = falcon.App
+    FALCON3 = True
+except ImportError:
+    import falcon.api_helpers  # type: ignore
+
+    falcon_helpers = falcon.api_helpers
+    falcon_app_class = falcon.API
+    FALCON3 = False
+
+
+_FALCON_UNSET = None  # type: Optional[object]
+if FALCON3:  # falcon.request._UNSET is only available in Falcon 3.0+
+    with capture_internal_exceptions():
+        from falcon.request import _UNSET as _FALCON_UNSET  # type: ignore[import-not-found, no-redef]
+
+
+class FalconRequestExtractor(RequestExtractor):
+    def env(self):
+        # type: () -> Dict[str, Any]
+        return self.request.env
+
+    def cookies(self):
+        # type: () -> Dict[str, Any]
+        return self.request.cookies
+
+    def form(self):
+        # type: () -> None
+        return None  # No such concept in Falcon
+
+    def files(self):
+        # type: () -> None
+        return None  # No such concept in Falcon
+
+    def raw_data(self):
+        # type: () -> Optional[str]
+
+        # As request data can only be read once we won't make this available
+        # to Sentry. Just send back a dummy string in case there was a
+        # content length.
+        # TODO(jmagnusson): Figure out if there's a way to support this
+        content_length = self.content_length()
+        if content_length > 0:
+            return "[REQUEST_CONTAINING_RAW_DATA]"
+        else:
+            return None
+
+    def json(self):
+        # type: () -> Optional[Dict[str, Any]]
+        # fallback to cached_media = None if self.request._media is not available
+        cached_media = None
+        with capture_internal_exceptions():
+            # self.request._media is the cached self.request.media
+            # value. It is only available if self.request.media
+            # has already been accessed. Therefore, reading
+            # self.request._media will not exhaust the raw request
+            # stream (self.request.bounded_stream) because it has
+            # already been read if self.request._media is set.
+            cached_media = self.request._media
+
+        if cached_media is not _FALCON_UNSET:
+            return cached_media
+
+        return None
+
+
+class SentryFalconMiddleware:
+    """Captures exceptions in Falcon requests and send to Sentry"""
+
+    def process_request(self, req, resp, *args, **kwargs):
+        # type: (Any, Any, *Any, **Any) -> None
+        integration = sentry_sdk.get_client().get_integration(FalconIntegration)
+        if integration is None:
+            return
+
+        scope = sentry_sdk.get_isolation_scope()
+        scope._name = "falcon"
+        scope.add_event_processor(_make_request_event_processor(req, integration))
+
+
+TRANSACTION_STYLE_VALUES = ("uri_template", "path")
+
+
+class FalconIntegration(Integration):
+    identifier = "falcon"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(self, transaction_style="uri_template"):
+        # type: (str) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        version = parse_version(FALCON_VERSION)
+        _check_minimum_version(FalconIntegration, version)
+
+        _patch_wsgi_app()
+        _patch_handle_exception()
+        _patch_prepare_middleware()
+
+
+def _patch_wsgi_app():
+    # type: () -> None
+    original_wsgi_app = falcon_app_class.__call__
+
+    def sentry_patched_wsgi_app(self, env, start_response):
+        # type: (falcon.API, Any, Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(FalconIntegration)
+        if integration is None:
+            return original_wsgi_app(self, env, start_response)
+
+        sentry_wrapped = SentryWsgiMiddleware(
+            lambda envi, start_resp: original_wsgi_app(self, envi, start_resp),
+            span_origin=FalconIntegration.origin,
+        )
+
+        return sentry_wrapped(env, start_response)
+
+    falcon_app_class.__call__ = sentry_patched_wsgi_app
+
+
+def _patch_handle_exception():
+    # type: () -> None
+    original_handle_exception = falcon_app_class._handle_exception
+
+    @ensure_integration_enabled(FalconIntegration, original_handle_exception)
+    def sentry_patched_handle_exception(self, *args):
+        # type: (falcon.API, *Any) -> Any
+        # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
+        # method signature from `(ex, req, resp, params)` to
+        # `(req, resp, ex, params)`
+        ex = response = None
+        with capture_internal_exceptions():
+            ex = next(argument for argument in args if isinstance(argument, Exception))
+            response = next(
+                argument for argument in args if isinstance(argument, falcon.Response)
+            )
+
+        was_handled = original_handle_exception(self, *args)
+
+        if ex is None or response is None:
+            # Both ex and response should have a non-None value at this point; otherwise,
+            # there is an error with the SDK that will have been captured in the
+            # capture_internal_exceptions block above.
+            return was_handled
+
+        if _exception_leads_to_http_5xx(ex, response):
+            event, hint = event_from_exception(
+                ex,
+                client_options=sentry_sdk.get_client().options,
+                mechanism={"type": "falcon", "handled": False},
+            )
+            sentry_sdk.capture_event(event, hint=hint)
+
+        return was_handled
+
+    falcon_app_class._handle_exception = sentry_patched_handle_exception
+
+
+def _patch_prepare_middleware():
+    # type: () -> None
+    original_prepare_middleware = falcon_helpers.prepare_middleware
+
+    def sentry_patched_prepare_middleware(
+        middleware=None, independent_middleware=False, asgi=False
+    ):
+        # type: (Any, Any, bool) -> Any
+        if asgi:
+            # We don't support ASGI Falcon apps, so we don't patch anything here
+            return original_prepare_middleware(middleware, independent_middleware, asgi)
+
+        integration = sentry_sdk.get_client().get_integration(FalconIntegration)
+        if integration is not None:
+            middleware = [SentryFalconMiddleware()] + (middleware or [])
+
+        # We intentionally omit the asgi argument here, since the default is False anyways,
+        # and this way, we remain backwards-compatible with pre-3.0.0 Falcon versions.
+        return original_prepare_middleware(middleware, independent_middleware)
+
+    falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware
+
+
+def _exception_leads_to_http_5xx(ex, response):
+    # type: (Exception, falcon.Response) -> bool
+    is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
+        "5"
+    )
+    is_unhandled_error = not isinstance(
+        ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
+    )
+
+    # We only check the HTTP status on Falcon 3 because in Falcon 2, the status on the response
+    # at the stage where we capture it is listed as 200, even though we would expect to see a 500
+    # status. Since at the time of this change, Falcon 2 is ca. 4 years old, we have decided to
+    # only perform this check on Falcon 3+, despite the risk that some handled errors might be
+    # reported to Sentry as unhandled on Falcon 2.
+    return (is_server_error or is_unhandled_error) and (
+        not FALCON3 or _has_http_5xx_status(response)
+    )
+
+
+def _has_http_5xx_status(response):
+    # type: (falcon.Response) -> bool
+    return response.status.startswith("5")
+
+
+def _set_transaction_name_and_source(event, transaction_style, request):
+    # type: (Event, str, falcon.Request) -> None
+    name_for_style = {
+        "uri_template": request.uri_template,
+        "path": request.path,
+    }
+    event["transaction"] = name_for_style[transaction_style]
+    event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
+
+
+def _make_request_event_processor(req, integration):
+    # type: (falcon.Request, FalconIntegration) -> EventProcessor
+
+    def event_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        _set_transaction_name_and_source(event, integration.transaction_style, req)
+
+        with capture_internal_exceptions():
+            FalconRequestExtractor(req).extract_into_event(event)
+
+        return event
+
+    return event_processor
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/fastapi.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/fastapi.py
new file mode 100644
index 00000000..76c6adee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/fastapi.py
@@ -0,0 +1,147 @@
+import asyncio
+from copy import deepcopy
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.integrations import DidNotEnable
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource
+from sentry_sdk.utils import (
+    transaction_from_function,
+    logger,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Dict
+    from sentry_sdk._types import Event
+
+try:
+    from sentry_sdk.integrations.starlette import (
+        StarletteIntegration,
+        StarletteRequestExtractor,
+    )
+except DidNotEnable:
+    raise DidNotEnable("Starlette is not installed")
+
+try:
+    import fastapi  # type: ignore
+except ImportError:
+    raise DidNotEnable("FastAPI is not installed")
+
+
+_DEFAULT_TRANSACTION_NAME = "generic FastAPI request"
+
+
+class FastApiIntegration(StarletteIntegration):
+    identifier = "fastapi"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        patch_get_request_handler()
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, Any) -> None
+    name = ""
+
+    if transaction_style == "endpoint":
+        endpoint = request.scope.get("endpoint")
+        if endpoint:
+            name = transaction_from_function(endpoint) or ""
+
+    elif transaction_style == "url":
+        route = request.scope.get("route")
+        if route:
+            path = getattr(route, "path", None)
+            if path is not None:
+                name = path
+
+    if not name:
+        name = _DEFAULT_TRANSACTION_NAME
+        source = TransactionSource.ROUTE
+    else:
+        source = SOURCE_FOR_STYLE[transaction_style]
+
+    scope.set_transaction_name(name, source=source)
+    logger.debug(
+        "[FastAPI] Set transaction name and source on scope: %s / %s", name, source
+    )
+
+
+def patch_get_request_handler():
+    # type: () -> None
+    old_get_request_handler = fastapi.routing.get_request_handler
+
+    def _sentry_get_request_handler(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        dependant = kwargs.get("dependant")
+        if (
+            dependant
+            and dependant.call is not None
+            and not asyncio.iscoroutinefunction(dependant.call)
+        ):
+            old_call = dependant.call
+
+            @wraps(old_call)
+            def _sentry_call(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                current_scope = sentry_sdk.get_current_scope()
+                if current_scope.transaction is not None:
+                    current_scope.transaction.update_active_thread()
+
+                sentry_scope = sentry_sdk.get_isolation_scope()
+                if sentry_scope.profile is not None:
+                    sentry_scope.profile.update_active_thread_id()
+
+                return old_call(*args, **kwargs)
+
+            dependant.call = _sentry_call
+
+        old_app = old_get_request_handler(*args, **kwargs)
+
+        async def _sentry_app(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            integration = sentry_sdk.get_client().get_integration(FastApiIntegration)
+            if integration is None:
+                return await old_app(*args, **kwargs)
+
+            request = args[0]
+
+            _set_transaction_name_and_source(
+                sentry_sdk.get_current_scope(), integration.transaction_style, request
+            )
+            sentry_scope = sentry_sdk.get_isolation_scope()
+            extractor = StarletteRequestExtractor(request)
+            info = await extractor.extract_request_info()
+
+            def _make_request_event_processor(req, integration):
+                # type: (Any, Any) -> Callable[[Event, Dict[str, Any]], Event]
+                def event_processor(event, hint):
+                    # type: (Event, Dict[str, Any]) -> Event
+
+                    # Extract information from request
+                    request_info = event.get("request", {})
+                    if info:
+                        if "cookies" in info and should_send_default_pii():
+                            request_info["cookies"] = info["cookies"]
+                        if "data" in info:
+                            request_info["data"] = info["data"]
+                    event["request"] = deepcopy(request_info)
+
+                    return event
+
+                return event_processor
+
+            sentry_scope._name = FastApiIntegration.identifier
+            sentry_scope.add_event_processor(
+                _make_request_event_processor(request, integration)
+            )
+
+            return await old_app(*args, **kwargs)
+
+        return _sentry_app
+
+    fastapi.routing.get_request_handler = _sentry_get_request_handler
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/flask.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/flask.py
new file mode 100644
index 00000000..f45ec6db
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/flask.py
@@ -0,0 +1,275 @@
+import sentry_sdk
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.integrations._wsgi_common import (
+    DEFAULT_HTTP_METHODS_TO_CAPTURE,
+    RequestExtractor,
+)
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    package_version,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Dict, Union
+
+    from sentry_sdk._types import Event, EventProcessor
+    from sentry_sdk.integrations.wsgi import _ScopedResponse
+    from werkzeug.datastructures import FileStorage, ImmutableMultiDict
+
+
+try:
+    import flask_login  # type: ignore
+except ImportError:
+    flask_login = None
+
+try:
+    from flask import Flask, Request  # type: ignore
+    from flask import request as flask_request
+    from flask.signals import (
+        before_render_template,
+        got_request_exception,
+        request_started,
+    )
+    from markupsafe import Markup
+except ImportError:
+    raise DidNotEnable("Flask is not installed")
+
+try:
+    import blinker  # noqa
+except ImportError:
+    raise DidNotEnable("blinker is not installed")
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class FlaskIntegration(Integration):
+    identifier = "flask"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(
+        self,
+        transaction_style="endpoint",  # type: str
+        http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE,  # type: tuple[str, ...]
+    ):
+        # type: (...) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+        self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        try:
+            from quart import Quart  # type: ignore
+
+            if Flask == Quart:
+                # This is Quart masquerading as Flask, don't enable the Flask
+                # integration. See https://github.com/getsentry/sentry-python/issues/2709
+                raise DidNotEnable(
+                    "This is not a Flask app but rather Quart pretending to be Flask"
+                )
+        except ImportError:
+            pass
+
+        version = package_version("flask")
+        _check_minimum_version(FlaskIntegration, version)
+
+        before_render_template.connect(_add_sentry_trace)
+        request_started.connect(_request_started)
+        got_request_exception.connect(_capture_exception)
+
+        old_app = Flask.__call__
+
+        def sentry_patched_wsgi_app(self, environ, start_response):
+            # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+            if sentry_sdk.get_client().get_integration(FlaskIntegration) is None:
+                return old_app(self, environ, start_response)
+
+            integration = sentry_sdk.get_client().get_integration(FlaskIntegration)
+
+            middleware = SentryWsgiMiddleware(
+                lambda *a, **kw: old_app(self, *a, **kw),
+                span_origin=FlaskIntegration.origin,
+                http_methods_to_capture=(
+                    integration.http_methods_to_capture
+                    if integration
+                    else DEFAULT_HTTP_METHODS_TO_CAPTURE
+                ),
+            )
+            return middleware(environ, start_response)
+
+        Flask.__call__ = sentry_patched_wsgi_app
+
+
+def _add_sentry_trace(sender, template, context, **extra):
+    # type: (Flask, Any, Dict[str, Any], **Any) -> None
+    if "sentry_trace" in context:
+        return
+
+    scope = sentry_sdk.get_current_scope()
+    trace_meta = Markup(scope.trace_propagation_meta())
+    context["sentry_trace"] = trace_meta  # for backwards compatibility
+    context["sentry_trace_meta"] = trace_meta
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, Request) -> None
+    try:
+        name_for_style = {
+            "url": request.url_rule.rule,
+            "endpoint": request.url_rule.endpoint,
+        }
+        scope.set_transaction_name(
+            name_for_style[transaction_style],
+            source=SOURCE_FOR_STYLE[transaction_style],
+        )
+    except Exception:
+        pass
+
+
+def _request_started(app, **kwargs):
+    # type: (Flask, **Any) -> None
+    integration = sentry_sdk.get_client().get_integration(FlaskIntegration)
+    if integration is None:
+        return
+
+    request = flask_request._get_current_object()
+
+    # Set the transaction name and source here,
+    # but rely on WSGI middleware to actually start the transaction
+    _set_transaction_name_and_source(
+        sentry_sdk.get_current_scope(), integration.transaction_style, request
+    )
+
+    scope = sentry_sdk.get_isolation_scope()
+    evt_processor = _make_request_event_processor(app, request, integration)
+    scope.add_event_processor(evt_processor)
+
+
+class FlaskRequestExtractor(RequestExtractor):
+    def env(self):
+        # type: () -> Dict[str, str]
+        return self.request.environ
+
+    def cookies(self):
+        # type: () -> Dict[Any, Any]
+        return {
+            k: v[0] if isinstance(v, list) and len(v) == 1 else v
+            for k, v in self.request.cookies.items()
+        }
+
+    def raw_data(self):
+        # type: () -> bytes
+        return self.request.get_data()
+
+    def form(self):
+        # type: () -> ImmutableMultiDict[str, Any]
+        return self.request.form
+
+    def files(self):
+        # type: () -> ImmutableMultiDict[str, Any]
+        return self.request.files
+
+    def is_json(self):
+        # type: () -> bool
+        return self.request.is_json
+
+    def json(self):
+        # type: () -> Any
+        return self.request.get_json(silent=True)
+
+    def size_of_file(self, file):
+        # type: (FileStorage) -> int
+        return file.content_length
+
+
+def _make_request_event_processor(app, request, integration):
+    # type: (Flask, Callable[[], Request], FlaskIntegration) -> EventProcessor
+
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+
+        # if the request is gone we are fine not logging the data from
+        # it.  This might happen if the processor is pushed away to
+        # another thread.
+        if request is None:
+            return event
+
+        with capture_internal_exceptions():
+            FlaskRequestExtractor(request).extract_into_event(event)
+
+        if should_send_default_pii():
+            with capture_internal_exceptions():
+                _add_user_to_event(event)
+
+        return event
+
+    return inner
+
+
+@ensure_integration_enabled(FlaskIntegration)
+def _capture_exception(sender, exception, **kwargs):
+    # type: (Flask, Union[ValueError, BaseException], **Any) -> None
+    event, hint = event_from_exception(
+        exception,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "flask", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _add_user_to_event(event):
+    # type: (Event) -> None
+    if flask_login is None:
+        return
+
+    user = flask_login.current_user
+    if user is None:
+        return
+
+    with capture_internal_exceptions():
+        # Access this object as late as possible as accessing the user
+        # is relatively costly
+
+        user_info = event.setdefault("user", {})
+
+        try:
+            user_info.setdefault("id", user.get_id())
+            # TODO: more configurable user attrs here
+        except AttributeError:
+            # might happen if:
+            # - flask_login could not be imported
+            # - flask_login is not configured
+            # - no user is logged in
+            pass
+
+        # The following attribute accesses are ineffective for the general
+        # Flask-Login case, because the User interface of Flask-Login does not
+        # care about anything but the ID. However, Flask-User (based on
+        # Flask-Login) documents a few optional extra attributes.
+        #
+        # https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/docs/source/data_models.rst#fixed-data-model-property-names
+
+        try:
+            user_info.setdefault("email", user.email)
+        except Exception:
+            pass
+
+        try:
+            user_info.setdefault("username", user.username)
+        except Exception:
+            pass
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gcp.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gcp.py
new file mode 100644
index 00000000..c637b741
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gcp.py
@@ -0,0 +1,234 @@
+import functools
+import sys
+from copy import deepcopy
+from datetime import datetime, timedelta, timezone
+from os import environ
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    capture_internal_exceptions,
+    event_from_exception,
+    logger,
+    TimeoutThread,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+# Constants
+TIMEOUT_WARNING_BUFFER = 1.5  # Buffer time required to send timeout warning to Sentry
+MILLIS_TO_SECONDS = 1000.0
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import TypeVar
+    from typing import Callable
+    from typing import Optional
+
+    from sentry_sdk._types import EventProcessor, Event, Hint
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+def _wrap_func(func):
+    # type: (F) -> F
+    @functools.wraps(func)
+    def sentry_func(functionhandler, gcp_event, *args, **kwargs):
+        # type: (Any, Any, *Any, **Any) -> Any
+        client = sentry_sdk.get_client()
+
+        integration = client.get_integration(GcpIntegration)
+        if integration is None:
+            return func(functionhandler, gcp_event, *args, **kwargs)
+
+        configured_time = environ.get("FUNCTION_TIMEOUT_SEC")
+        if not configured_time:
+            logger.debug(
+                "The configured timeout could not be fetched from Cloud Functions configuration."
+            )
+            return func(functionhandler, gcp_event, *args, **kwargs)
+
+        configured_time = int(configured_time)
+
+        initial_time = datetime.now(timezone.utc)
+
+        with sentry_sdk.isolation_scope() as scope:
+            with capture_internal_exceptions():
+                scope.clear_breadcrumbs()
+                scope.add_event_processor(
+                    _make_request_event_processor(
+                        gcp_event, configured_time, initial_time
+                    )
+                )
+                scope.set_tag("gcp_region", environ.get("FUNCTION_REGION"))
+                timeout_thread = None
+                if (
+                    integration.timeout_warning
+                    and configured_time > TIMEOUT_WARNING_BUFFER
+                ):
+                    waiting_time = configured_time - TIMEOUT_WARNING_BUFFER
+
+                    timeout_thread = TimeoutThread(waiting_time, configured_time)
+
+                    # Starting the thread to raise timeout warning exception
+                    timeout_thread.start()
+
+            headers = {}
+            if hasattr(gcp_event, "headers"):
+                headers = gcp_event.headers
+
+            transaction = continue_trace(
+                headers,
+                op=OP.FUNCTION_GCP,
+                name=environ.get("FUNCTION_NAME", ""),
+                source=TransactionSource.COMPONENT,
+                origin=GcpIntegration.origin,
+            )
+            sampling_context = {
+                "gcp_env": {
+                    "function_name": environ.get("FUNCTION_NAME"),
+                    "function_entry_point": environ.get("ENTRY_POINT"),
+                    "function_identity": environ.get("FUNCTION_IDENTITY"),
+                    "function_region": environ.get("FUNCTION_REGION"),
+                    "function_project": environ.get("GCP_PROJECT"),
+                },
+                "gcp_event": gcp_event,
+            }
+            with sentry_sdk.start_transaction(
+                transaction, custom_sampling_context=sampling_context
+            ):
+                try:
+                    return func(functionhandler, gcp_event, *args, **kwargs)
+                except Exception:
+                    exc_info = sys.exc_info()
+                    sentry_event, hint = event_from_exception(
+                        exc_info,
+                        client_options=client.options,
+                        mechanism={"type": "gcp", "handled": False},
+                    )
+                    sentry_sdk.capture_event(sentry_event, hint=hint)
+                    reraise(*exc_info)
+                finally:
+                    if timeout_thread:
+                        timeout_thread.stop()
+                    # Flush out the event queue
+                    client.flush()
+
+    return sentry_func  # type: ignore
+
+
+class GcpIntegration(Integration):
+    identifier = "gcp"
+    origin = f"auto.function.{identifier}"
+
+    def __init__(self, timeout_warning=False):
+        # type: (bool) -> None
+        self.timeout_warning = timeout_warning
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        import __main__ as gcp_functions
+
+        if not hasattr(gcp_functions, "worker_v1"):
+            logger.warning(
+                "GcpIntegration currently supports only Python 3.7 runtime environment."
+            )
+            return
+
+        worker1 = gcp_functions.worker_v1
+
+        worker1.FunctionHandler.invoke_user_function = _wrap_func(
+            worker1.FunctionHandler.invoke_user_function
+        )
+
+
+def _make_request_event_processor(gcp_event, configured_timeout, initial_time):
+    # type: (Any, Any, Any) -> EventProcessor
+
+    def event_processor(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+
+        final_time = datetime.now(timezone.utc)
+        time_diff = final_time - initial_time
+
+        execution_duration_in_millis = time_diff / timedelta(milliseconds=1)
+
+        extra = event.setdefault("extra", {})
+        extra["google cloud functions"] = {
+            "function_name": environ.get("FUNCTION_NAME"),
+            "function_entry_point": environ.get("ENTRY_POINT"),
+            "function_identity": environ.get("FUNCTION_IDENTITY"),
+            "function_region": environ.get("FUNCTION_REGION"),
+            "function_project": environ.get("GCP_PROJECT"),
+            "execution_duration_in_millis": execution_duration_in_millis,
+            "configured_timeout_in_seconds": configured_timeout,
+        }
+
+        extra["google cloud logs"] = {
+            "url": _get_google_cloud_logs_url(final_time),
+        }
+
+        request = event.get("request", {})
+
+        request["url"] = "gcp:///{}".format(environ.get("FUNCTION_NAME"))
+
+        if hasattr(gcp_event, "method"):
+            request["method"] = gcp_event.method
+
+        if hasattr(gcp_event, "query_string"):
+            request["query_string"] = gcp_event.query_string.decode("utf-8")
+
+        if hasattr(gcp_event, "headers"):
+            request["headers"] = _filter_headers(gcp_event.headers)
+
+        if should_send_default_pii():
+            if hasattr(gcp_event, "data"):
+                request["data"] = gcp_event.data
+        else:
+            if hasattr(gcp_event, "data"):
+                # Unfortunately couldn't find a way to get structured body from GCP
+                # event. Meaning every body is unstructured to us.
+                request["data"] = AnnotatedValue.removed_because_raw_data()
+
+        event["request"] = deepcopy(request)
+
+        return event
+
+    return event_processor
+
+
+def _get_google_cloud_logs_url(final_time):
+    # type: (datetime) -> str
+    """
+    Generates a Google Cloud Logs console URL based on the environment variables
+    Arguments:
+        final_time {datetime} -- Final time
+    Returns:
+        str -- Google Cloud Logs Console URL to logs.
+    """
+    hour_ago = final_time - timedelta(hours=1)
+    formatstring = "%Y-%m-%dT%H:%M:%SZ"
+
+    url = (
+        "https://console.cloud.google.com/logs/viewer?project={project}&resource=cloud_function"
+        "%2Ffunction_name%2F{function_name}%2Fregion%2F{region}&minLogLevel=0&expandAll=false"
+        "&timestamp={timestamp_end}&customFacets=&limitCustomFacetWidth=true"
+        "&dateRangeStart={timestamp_start}&dateRangeEnd={timestamp_end}"
+        "&interval=PT1H&scrollTimestamp={timestamp_end}"
+    ).format(
+        project=environ.get("GCP_PROJECT"),
+        function_name=environ.get("FUNCTION_NAME"),
+        region=environ.get("FUNCTION_REGION"),
+        timestamp_end=final_time.strftime(formatstring),
+        timestamp_start=hour_ago.strftime(formatstring),
+    )
+
+    return url
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gnu_backtrace.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gnu_backtrace.py
new file mode 100644
index 00000000..dc3dc80f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gnu_backtrace.py
@@ -0,0 +1,107 @@
+import re
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.utils import capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from sentry_sdk._types import Event
+
+
+MODULE_RE = r"[a-zA-Z0-9/._:\\-]+"
+TYPE_RE = r"[a-zA-Z0-9._:<>,-]+"
+HEXVAL_RE = r"[A-Fa-f0-9]+"
+
+
+FRAME_RE = r"""
+^(?P<index>\d+)\.\s
+(?P<package>{MODULE_RE})\(
+  (?P<retval>{TYPE_RE}\ )?
+  ((?P<function>{TYPE_RE})
+    (?P<args>\(.*\))?
+  )?
+  ((?P<constoffset>\ const)?\+0x(?P<offset>{HEXVAL_RE}))?
+\)\s
+\[0x(?P<retaddr>{HEXVAL_RE})\]$
+""".format(
+    MODULE_RE=MODULE_RE, HEXVAL_RE=HEXVAL_RE, TYPE_RE=TYPE_RE
+)
+
+FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE)
+
+
+class GnuBacktraceIntegration(Integration):
+    identifier = "gnu_backtrace"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        @add_global_event_processor
+        def process_gnu_backtrace(event, hint):
+            # type: (Event, dict[str, Any]) -> Event
+            with capture_internal_exceptions():
+                return _process_gnu_backtrace(event, hint)
+
+
+def _process_gnu_backtrace(event, hint):
+    # type: (Event, dict[str, Any]) -> Event
+    if sentry_sdk.get_client().get_integration(GnuBacktraceIntegration) is None:
+        return event
+
+    exc_info = hint.get("exc_info", None)
+
+    if exc_info is None:
+        return event
+
+    exception = event.get("exception", None)
+
+    if exception is None:
+        return event
+
+    values = exception.get("values", None)
+
+    if values is None:
+        return event
+
+    for exception in values:
+        frames = exception.get("stacktrace", {}).get("frames", [])
+        if not frames:
+            continue
+
+        msg = exception.get("value", None)
+        if not msg:
+            continue
+
+        additional_frames = []
+        new_msg = []
+
+        for line in msg.splitlines():
+            match = FRAME_RE.match(line)
+            if match:
+                additional_frames.append(
+                    (
+                        int(match.group("index")),
+                        {
+                            "package": match.group("package") or None,
+                            "function": match.group("function") or None,
+                            "platform": "native",
+                        },
+                    )
+                )
+            else:
+                # Put garbage lines back into message, not sure what to do with them.
+                new_msg.append(line)
+
+        if additional_frames:
+            additional_frames.sort(key=lambda x: -x[0])
+            for _, frame in additional_frames:
+                frames.append(frame)
+
+            new_msg.append("<stacktrace parsed and removed by GnuBacktraceIntegration>")
+            exception["value"] = "\n".join(new_msg)
+
+    return event
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gql.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gql.py
new file mode 100644
index 00000000..5f4436f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/gql.py
@@ -0,0 +1,145 @@
+import sentry_sdk
+from sentry_sdk.utils import (
+    event_from_exception,
+    ensure_integration_enabled,
+    parse_version,
+)
+
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+
+try:
+    import gql  # type: ignore[import-not-found]
+    from graphql import (
+        print_ast,
+        get_operation_ast,
+        DocumentNode,
+        VariableDefinitionNode,
+    )
+    from gql.transport import Transport, AsyncTransport  # type: ignore[import-not-found]
+    from gql.transport.exceptions import TransportQueryError  # type: ignore[import-not-found]
+except ImportError:
+    raise DidNotEnable("gql is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Dict, Tuple, Union
+    from sentry_sdk._types import Event, EventProcessor
+
+    EventDataType = Dict[str, Union[str, Tuple[VariableDefinitionNode, ...]]]
+
+
+class GQLIntegration(Integration):
+    identifier = "gql"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        gql_version = parse_version(gql.__version__)
+        _check_minimum_version(GQLIntegration, gql_version)
+
+        _patch_execute()
+
+
+def _data_from_document(document):
+    # type: (DocumentNode) -> EventDataType
+    try:
+        operation_ast = get_operation_ast(document)
+        data = {"query": print_ast(document)}  # type: EventDataType
+
+        if operation_ast is not None:
+            data["variables"] = operation_ast.variable_definitions
+            if operation_ast.name is not None:
+                data["operationName"] = operation_ast.name.value
+
+        return data
+    except (AttributeError, TypeError):
+        return dict()
+
+
+def _transport_method(transport):
+    # type: (Union[Transport, AsyncTransport]) -> str
+    """
+    The RequestsHTTPTransport allows defining the HTTP method; all
+    other transports use POST.
+    """
+    try:
+        return transport.method
+    except AttributeError:
+        return "POST"
+
+
+def _request_info_from_transport(transport):
+    # type: (Union[Transport, AsyncTransport, None]) -> Dict[str, str]
+    if transport is None:
+        return {}
+
+    request_info = {
+        "method": _transport_method(transport),
+    }
+
+    try:
+        request_info["url"] = transport.url
+    except AttributeError:
+        pass
+
+    return request_info
+
+
+def _patch_execute():
+    # type: () -> None
+    real_execute = gql.Client.execute
+
+    @ensure_integration_enabled(GQLIntegration, real_execute)
+    def sentry_patched_execute(self, document, *args, **kwargs):
+        # type: (gql.Client, DocumentNode, Any, Any) -> Any
+        scope = sentry_sdk.get_isolation_scope()
+        scope.add_event_processor(_make_gql_event_processor(self, document))
+
+        try:
+            return real_execute(self, document, *args, **kwargs)
+        except TransportQueryError as e:
+            event, hint = event_from_exception(
+                e,
+                client_options=sentry_sdk.get_client().options,
+                mechanism={"type": "gql", "handled": False},
+            )
+
+            sentry_sdk.capture_event(event, hint)
+            raise e
+
+    gql.Client.execute = sentry_patched_execute
+
+
+def _make_gql_event_processor(client, document):
+    # type: (gql.Client, DocumentNode) -> EventProcessor
+    def processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        try:
+            errors = hint["exc_info"][1].errors
+        except (AttributeError, KeyError):
+            errors = None
+
+        request = event.setdefault("request", {})
+        request.update(
+            {
+                "api_target": "graphql",
+                **_request_info_from_transport(client.transport),
+            }
+        )
+
+        if should_send_default_pii():
+            request["data"] = _data_from_document(document)
+            contexts = event.setdefault("contexts", {})
+            response = contexts.setdefault("response", {})
+            response.update(
+                {
+                    "data": {"errors": errors},
+                    "type": response,
+                }
+            )
+
+        return event
+
+    return processor
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/graphene.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/graphene.py
new file mode 100644
index 00000000..00a8d155
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/graphene.py
@@ -0,0 +1,151 @@
+from contextlib import contextmanager
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    package_version,
+)
+
+try:
+    from graphene.types import schema as graphene_schema  # type: ignore
+except ImportError:
+    raise DidNotEnable("graphene is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Generator
+    from typing import Any, Dict, Union
+    from graphene.language.source import Source  # type: ignore
+    from graphql.execution import ExecutionResult
+    from graphql.type import GraphQLSchema
+    from sentry_sdk._types import Event
+
+
+class GrapheneIntegration(Integration):
+    identifier = "graphene"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = package_version("graphene")
+        _check_minimum_version(GrapheneIntegration, version)
+
+        _patch_graphql()
+
+
+def _patch_graphql():
+    # type: () -> None
+    old_graphql_sync = graphene_schema.graphql_sync
+    old_graphql_async = graphene_schema.graphql
+
+    @ensure_integration_enabled(GrapheneIntegration, old_graphql_sync)
+    def _sentry_patched_graphql_sync(schema, source, *args, **kwargs):
+        # type: (GraphQLSchema, Union[str, Source], Any, Any) -> ExecutionResult
+        scope = sentry_sdk.get_isolation_scope()
+        scope.add_event_processor(_event_processor)
+
+        with graphql_span(schema, source, kwargs):
+            result = old_graphql_sync(schema, source, *args, **kwargs)
+
+        with capture_internal_exceptions():
+            client = sentry_sdk.get_client()
+            for error in result.errors or []:
+                event, hint = event_from_exception(
+                    error,
+                    client_options=client.options,
+                    mechanism={
+                        "type": GrapheneIntegration.identifier,
+                        "handled": False,
+                    },
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+
+        return result
+
+    async def _sentry_patched_graphql_async(schema, source, *args, **kwargs):
+        # type: (GraphQLSchema, Union[str, Source], Any, Any) -> ExecutionResult
+        integration = sentry_sdk.get_client().get_integration(GrapheneIntegration)
+        if integration is None:
+            return await old_graphql_async(schema, source, *args, **kwargs)
+
+        scope = sentry_sdk.get_isolation_scope()
+        scope.add_event_processor(_event_processor)
+
+        with graphql_span(schema, source, kwargs):
+            result = await old_graphql_async(schema, source, *args, **kwargs)
+
+        with capture_internal_exceptions():
+            client = sentry_sdk.get_client()
+            for error in result.errors or []:
+                event, hint = event_from_exception(
+                    error,
+                    client_options=client.options,
+                    mechanism={
+                        "type": GrapheneIntegration.identifier,
+                        "handled": False,
+                    },
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+
+        return result
+
+    graphene_schema.graphql_sync = _sentry_patched_graphql_sync
+    graphene_schema.graphql = _sentry_patched_graphql_async
+
+
+def _event_processor(event, hint):
+    # type: (Event, Dict[str, Any]) -> Event
+    if should_send_default_pii():
+        request_info = event.setdefault("request", {})
+        request_info["api_target"] = "graphql"
+
+    elif event.get("request", {}).get("data"):
+        del event["request"]["data"]
+
+    return event
+
+
+@contextmanager
+def graphql_span(schema, source, kwargs):
+    # type: (GraphQLSchema, Union[str, Source], Dict[str, Any]) -> Generator[None, None, None]
+    operation_name = kwargs.get("operation_name")
+
+    operation_type = "query"
+    op = OP.GRAPHQL_QUERY
+    if source.strip().startswith("mutation"):
+        operation_type = "mutation"
+        op = OP.GRAPHQL_MUTATION
+    elif source.strip().startswith("subscription"):
+        operation_type = "subscription"
+        op = OP.GRAPHQL_SUBSCRIPTION
+
+    sentry_sdk.add_breadcrumb(
+        crumb={
+            "data": {
+                "operation_name": operation_name,
+                "operation_type": operation_type,
+            },
+            "category": "graphql.operation",
+        },
+    )
+
+    scope = sentry_sdk.get_current_scope()
+    if scope.span:
+        _graphql_span = scope.span.start_child(op=op, name=operation_name)
+    else:
+        _graphql_span = sentry_sdk.start_span(op=op, name=operation_name)
+
+    _graphql_span.set_data("graphql.document", source)
+    _graphql_span.set_data("graphql.operation.name", operation_name)
+    _graphql_span.set_data("graphql.operation.type", operation_type)
+
+    try:
+        yield
+    finally:
+        _graphql_span.finish()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/__init__.py
new file mode 100644
index 00000000..d9dcdddb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/__init__.py
@@ -0,0 +1,151 @@
+from functools import wraps
+
+import grpc
+from grpc import Channel, Server, intercept_channel
+from grpc.aio import Channel as AsyncChannel
+from grpc.aio import Server as AsyncServer
+
+from sentry_sdk.integrations import Integration
+
+from .client import ClientInterceptor
+from .server import ServerInterceptor
+from .aio.server import ServerInterceptor as AsyncServerInterceptor
+from .aio.client import (
+    SentryUnaryUnaryClientInterceptor as AsyncUnaryUnaryClientInterceptor,
+)
+from .aio.client import (
+    SentryUnaryStreamClientInterceptor as AsyncUnaryStreamClientIntercetor,
+)
+
+from typing import TYPE_CHECKING, Any, Optional, Sequence
+
+# Hack to get new Python features working in older versions
+# without introducing a hard dependency on `typing_extensions`
+# from: https://stackoverflow.com/a/71944042/300572
+if TYPE_CHECKING:
+    from typing import ParamSpec, Callable
+else:
+    # Fake ParamSpec
+    class ParamSpec:
+        def __init__(self, _):
+            self.args = None
+            self.kwargs = None
+
+    # Callable[anything] will return None
+    class _Callable:
+        def __getitem__(self, _):
+            return None
+
+    # Make instances
+    Callable = _Callable()
+
+P = ParamSpec("P")
+
+
+def _wrap_channel_sync(func: Callable[P, Channel]) -> Callable[P, Channel]:
+    "Wrapper for synchronous secure and insecure channel."
+
+    @wraps(func)
+    def patched_channel(*args: Any, **kwargs: Any) -> Channel:
+        channel = func(*args, **kwargs)
+        if not ClientInterceptor._is_intercepted:
+            ClientInterceptor._is_intercepted = True
+            return intercept_channel(channel, ClientInterceptor())
+        else:
+            return channel
+
+    return patched_channel
+
+
+def _wrap_intercept_channel(func: Callable[P, Channel]) -> Callable[P, Channel]:
+    @wraps(func)
+    def patched_intercept_channel(
+        channel: Channel, *interceptors: grpc.ServerInterceptor
+    ) -> Channel:
+        if ClientInterceptor._is_intercepted:
+            interceptors = tuple(
+                [
+                    interceptor
+                    for interceptor in interceptors
+                    if not isinstance(interceptor, ClientInterceptor)
+                ]
+            )
+        else:
+            interceptors = interceptors
+        return intercept_channel(channel, *interceptors)
+
+    return patched_intercept_channel  # type: ignore
+
+
+def _wrap_channel_async(func: Callable[P, AsyncChannel]) -> Callable[P, AsyncChannel]:
+    "Wrapper for asynchronous secure and insecure channel."
+
+    @wraps(func)
+    def patched_channel(  # type: ignore
+        *args: P.args,
+        interceptors: Optional[Sequence[grpc.aio.ClientInterceptor]] = None,
+        **kwargs: P.kwargs,
+    ) -> Channel:
+        sentry_interceptors = [
+            AsyncUnaryUnaryClientInterceptor(),
+            AsyncUnaryStreamClientIntercetor(),
+        ]
+        interceptors = [*sentry_interceptors, *(interceptors or [])]
+        return func(*args, interceptors=interceptors, **kwargs)  # type: ignore
+
+    return patched_channel  # type: ignore
+
+
+def _wrap_sync_server(func: Callable[P, Server]) -> Callable[P, Server]:
+    """Wrapper for synchronous server."""
+
+    @wraps(func)
+    def patched_server(  # type: ignore
+        *args: P.args,
+        interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None,
+        **kwargs: P.kwargs,
+    ) -> Server:
+        interceptors = [
+            interceptor
+            for interceptor in interceptors or []
+            if not isinstance(interceptor, ServerInterceptor)
+        ]
+        server_interceptor = ServerInterceptor()
+        interceptors = [server_interceptor, *(interceptors or [])]
+        return func(*args, interceptors=interceptors, **kwargs)  # type: ignore
+
+    return patched_server  # type: ignore
+
+
+def _wrap_async_server(func: Callable[P, AsyncServer]) -> Callable[P, AsyncServer]:
+    """Wrapper for asynchronous server."""
+
+    @wraps(func)
+    def patched_aio_server(  # type: ignore
+        *args: P.args,
+        interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None,
+        **kwargs: P.kwargs,
+    ) -> Server:
+        server_interceptor = AsyncServerInterceptor()
+        interceptors = (server_interceptor, *(interceptors or []))
+        return func(*args, interceptors=interceptors, **kwargs)  # type: ignore
+
+    return patched_aio_server  # type: ignore
+
+
+class GRPCIntegration(Integration):
+    identifier = "grpc"
+
+    @staticmethod
+    def setup_once() -> None:
+        import grpc
+
+        grpc.insecure_channel = _wrap_channel_sync(grpc.insecure_channel)
+        grpc.secure_channel = _wrap_channel_sync(grpc.secure_channel)
+        grpc.intercept_channel = _wrap_intercept_channel(grpc.intercept_channel)
+
+        grpc.aio.insecure_channel = _wrap_channel_async(grpc.aio.insecure_channel)
+        grpc.aio.secure_channel = _wrap_channel_async(grpc.aio.secure_channel)
+
+        grpc.server = _wrap_sync_server(grpc.server)
+        grpc.aio.server = _wrap_async_server(grpc.aio.server)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/__init__.py
new file mode 100644
index 00000000..5b9e3b99
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/__init__.py
@@ -0,0 +1,7 @@
+from .server import ServerInterceptor
+from .client import ClientInterceptor
+
+__all__ = [
+    "ClientInterceptor",
+    "ServerInterceptor",
+]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/client.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/client.py
new file mode 100644
index 00000000..ff3c2131
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/client.py
@@ -0,0 +1,94 @@
+from typing import Callable, Union, AsyncIterable, Any
+
+from grpc.aio import (
+    UnaryUnaryClientInterceptor,
+    UnaryStreamClientInterceptor,
+    ClientCallDetails,
+    UnaryUnaryCall,
+    UnaryStreamCall,
+    Metadata,
+)
+from google.protobuf.message import Message
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations.grpc.consts import SPAN_ORIGIN
+
+
+class ClientInterceptor:
+    @staticmethod
+    def _update_client_call_details_metadata_from_scope(
+        client_call_details: ClientCallDetails,
+    ) -> ClientCallDetails:
+        if client_call_details.metadata is None:
+            client_call_details = client_call_details._replace(metadata=Metadata())
+        elif not isinstance(client_call_details.metadata, Metadata):
+            # This is a workaround for a GRPC bug, which was fixed in grpcio v1.60.0
+            # See https://github.com/grpc/grpc/issues/34298.
+            client_call_details = client_call_details._replace(
+                metadata=Metadata.from_tuple(client_call_details.metadata)
+            )
+        for (
+            key,
+            value,
+        ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers():
+            client_call_details.metadata.add(key, value)
+        return client_call_details
+
+
+class SentryUnaryUnaryClientInterceptor(ClientInterceptor, UnaryUnaryClientInterceptor):  # type: ignore
+    async def intercept_unary_unary(
+        self,
+        continuation: Callable[[ClientCallDetails, Message], UnaryUnaryCall],
+        client_call_details: ClientCallDetails,
+        request: Message,
+    ) -> Union[UnaryUnaryCall, Message]:
+        method = client_call_details.method
+
+        with sentry_sdk.start_span(
+            op=OP.GRPC_CLIENT,
+            name="unary unary call to %s" % method.decode(),
+            origin=SPAN_ORIGIN,
+        ) as span:
+            span.set_data("type", "unary unary")
+            span.set_data("method", method)
+
+            client_call_details = self._update_client_call_details_metadata_from_scope(
+                client_call_details
+            )
+
+            response = await continuation(client_call_details, request)
+            status_code = await response.code()
+            span.set_data("code", status_code.name)
+
+            return response
+
+
+class SentryUnaryStreamClientInterceptor(
+    ClientInterceptor, UnaryStreamClientInterceptor  # type: ignore
+):
+    async def intercept_unary_stream(
+        self,
+        continuation: Callable[[ClientCallDetails, Message], UnaryStreamCall],
+        client_call_details: ClientCallDetails,
+        request: Message,
+    ) -> Union[AsyncIterable[Any], UnaryStreamCall]:
+        method = client_call_details.method
+
+        with sentry_sdk.start_span(
+            op=OP.GRPC_CLIENT,
+            name="unary stream call to %s" % method.decode(),
+            origin=SPAN_ORIGIN,
+        ) as span:
+            span.set_data("type", "unary stream")
+            span.set_data("method", method)
+
+            client_call_details = self._update_client_call_details_metadata_from_scope(
+                client_call_details
+            )
+
+            response = await continuation(client_call_details, request)
+            # status_code = await response.code()
+            # span.set_data("code", status_code)
+
+            return response
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/server.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/server.py
new file mode 100644
index 00000000..381c6310
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/aio/server.py
@@ -0,0 +1,100 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import DidNotEnable
+from sentry_sdk.integrations.grpc.consts import SPAN_ORIGIN
+from sentry_sdk.tracing import Transaction, TransactionSource
+from sentry_sdk.utils import event_from_exception
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Awaitable, Callable
+    from typing import Any, Optional
+
+
+try:
+    import grpc
+    from grpc import HandlerCallDetails, RpcMethodHandler
+    from grpc.aio import AbortError, ServicerContext
+except ImportError:
+    raise DidNotEnable("grpcio is not installed")
+
+
+class ServerInterceptor(grpc.aio.ServerInterceptor):  # type: ignore
+    def __init__(self, find_name=None):
+        # type: (ServerInterceptor, Callable[[ServicerContext], str] | None) -> None
+        self._find_method_name = find_name or self._find_name
+
+        super().__init__()
+
+    async def intercept_service(self, continuation, handler_call_details):
+        # type: (ServerInterceptor, Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]], HandlerCallDetails) -> Optional[Awaitable[RpcMethodHandler]]
+        self._handler_call_details = handler_call_details
+        handler = await continuation(handler_call_details)
+        if handler is None:
+            return None
+
+        if not handler.request_streaming and not handler.response_streaming:
+            handler_factory = grpc.unary_unary_rpc_method_handler
+
+            async def wrapped(request, context):
+                # type: (Any, ServicerContext) -> Any
+                name = self._find_method_name(context)
+                if not name:
+                    return await handler(request, context)
+
+                # What if the headers are empty?
+                transaction = Transaction.continue_from_headers(
+                    dict(context.invocation_metadata()),
+                    op=OP.GRPC_SERVER,
+                    name=name,
+                    source=TransactionSource.CUSTOM,
+                    origin=SPAN_ORIGIN,
+                )
+
+                with sentry_sdk.start_transaction(transaction=transaction):
+                    try:
+                        return await handler.unary_unary(request, context)
+                    except AbortError:
+                        raise
+                    except Exception as exc:
+                        event, hint = event_from_exception(
+                            exc,
+                            mechanism={"type": "grpc", "handled": False},
+                        )
+                        sentry_sdk.capture_event(event, hint=hint)
+                        raise
+
+        elif not handler.request_streaming and handler.response_streaming:
+            handler_factory = grpc.unary_stream_rpc_method_handler
+
+            async def wrapped(request, context):  # type: ignore
+                # type: (Any, ServicerContext) -> Any
+                async for r in handler.unary_stream(request, context):
+                    yield r
+
+        elif handler.request_streaming and not handler.response_streaming:
+            handler_factory = grpc.stream_unary_rpc_method_handler
+
+            async def wrapped(request, context):
+                # type: (Any, ServicerContext) -> Any
+                response = handler.stream_unary(request, context)
+                return await response
+
+        elif handler.request_streaming and handler.response_streaming:
+            handler_factory = grpc.stream_stream_rpc_method_handler
+
+            async def wrapped(request, context):  # type: ignore
+                # type: (Any, ServicerContext) -> Any
+                async for r in handler.stream_stream(request, context):
+                    yield r
+
+        return handler_factory(
+            wrapped,
+            request_deserializer=handler.request_deserializer,
+            response_serializer=handler.response_serializer,
+        )
+
+    def _find_name(self, context):
+        # type: (ServicerContext) -> str
+        return self._handler_call_details.method
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/client.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/client.py
new file mode 100644
index 00000000..a5b4f9f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/client.py
@@ -0,0 +1,92 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import DidNotEnable
+from sentry_sdk.integrations.grpc.consts import SPAN_ORIGIN
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Iterator, Iterable, Union
+
+try:
+    import grpc
+    from grpc import ClientCallDetails, Call
+    from grpc._interceptor import _UnaryOutcome
+    from grpc.aio._interceptor import UnaryStreamCall
+    from google.protobuf.message import Message
+except ImportError:
+    raise DidNotEnable("grpcio is not installed")
+
+
+class ClientInterceptor(
+    grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor  # type: ignore
+):
+    _is_intercepted = False
+
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        # type: (ClientInterceptor, Callable[[ClientCallDetails, Message], _UnaryOutcome], ClientCallDetails, Message) -> _UnaryOutcome
+        method = client_call_details.method
+
+        with sentry_sdk.start_span(
+            op=OP.GRPC_CLIENT,
+            name="unary unary call to %s" % method,
+            origin=SPAN_ORIGIN,
+        ) as span:
+            span.set_data("type", "unary unary")
+            span.set_data("method", method)
+
+            client_call_details = self._update_client_call_details_metadata_from_scope(
+                client_call_details
+            )
+
+            response = continuation(client_call_details, request)
+            span.set_data("code", response.code().name)
+
+            return response
+
+    def intercept_unary_stream(self, continuation, client_call_details, request):
+        # type: (ClientInterceptor, Callable[[ClientCallDetails, Message], Union[Iterable[Any], UnaryStreamCall]], ClientCallDetails, Message) -> Union[Iterator[Message], Call]
+        method = client_call_details.method
+
+        with sentry_sdk.start_span(
+            op=OP.GRPC_CLIENT,
+            name="unary stream call to %s" % method,
+            origin=SPAN_ORIGIN,
+        ) as span:
+            span.set_data("type", "unary stream")
+            span.set_data("method", method)
+
+            client_call_details = self._update_client_call_details_metadata_from_scope(
+                client_call_details
+            )
+
+            response = continuation(
+                client_call_details, request
+            )  # type: UnaryStreamCall
+            # Setting code on unary-stream leads to execution getting stuck
+            # span.set_data("code", response.code().name)
+
+            return response
+
+    @staticmethod
+    def _update_client_call_details_metadata_from_scope(client_call_details):
+        # type: (ClientCallDetails) -> ClientCallDetails
+        metadata = (
+            list(client_call_details.metadata) if client_call_details.metadata else []
+        )
+        for (
+            key,
+            value,
+        ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers():
+            metadata.append((key, value))
+
+        client_call_details = grpc._interceptor._ClientCallDetails(
+            method=client_call_details.method,
+            timeout=client_call_details.timeout,
+            metadata=metadata,
+            credentials=client_call_details.credentials,
+            wait_for_ready=client_call_details.wait_for_ready,
+            compression=client_call_details.compression,
+        )
+
+        return client_call_details
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/consts.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/consts.py
new file mode 100644
index 00000000..9fdb975c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/consts.py
@@ -0,0 +1 @@
+SPAN_ORIGIN = "auto.grpc.grpc"
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/server.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/server.py
new file mode 100644
index 00000000..0d2792d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/grpc/server.py
@@ -0,0 +1,66 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import DidNotEnable
+from sentry_sdk.integrations.grpc.consts import SPAN_ORIGIN
+from sentry_sdk.tracing import Transaction, TransactionSource
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Callable, Optional
+    from google.protobuf.message import Message
+
+try:
+    import grpc
+    from grpc import ServicerContext, HandlerCallDetails, RpcMethodHandler
+except ImportError:
+    raise DidNotEnable("grpcio is not installed")
+
+
+class ServerInterceptor(grpc.ServerInterceptor):  # type: ignore
+    def __init__(self, find_name=None):
+        # type: (ServerInterceptor, Optional[Callable[[ServicerContext], str]]) -> None
+        self._find_method_name = find_name or ServerInterceptor._find_name
+
+        super().__init__()
+
+    def intercept_service(self, continuation, handler_call_details):
+        # type: (ServerInterceptor, Callable[[HandlerCallDetails], RpcMethodHandler], HandlerCallDetails) -> RpcMethodHandler
+        handler = continuation(handler_call_details)
+        if not handler or not handler.unary_unary:
+            return handler
+
+        def behavior(request, context):
+            # type: (Message, ServicerContext) -> Message
+            with sentry_sdk.isolation_scope():
+                name = self._find_method_name(context)
+
+                if name:
+                    metadata = dict(context.invocation_metadata())
+
+                    transaction = Transaction.continue_from_headers(
+                        metadata,
+                        op=OP.GRPC_SERVER,
+                        name=name,
+                        source=TransactionSource.CUSTOM,
+                        origin=SPAN_ORIGIN,
+                    )
+
+                    with sentry_sdk.start_transaction(transaction=transaction):
+                        try:
+                            return handler.unary_unary(request, context)
+                        except BaseException as e:
+                            raise e
+                else:
+                    return handler.unary_unary(request, context)
+
+        return grpc.unary_unary_rpc_method_handler(
+            behavior,
+            request_deserializer=handler.request_deserializer,
+            response_serializer=handler.response_serializer,
+        )
+
+    @staticmethod
+    def _find_name(context):
+        # type: (ServicerContext) -> str
+        return context._rpc_event.call_details.method.decode()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/httpx.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/httpx.py
new file mode 100644
index 00000000..2ddd4448
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/httpx.py
@@ -0,0 +1,167 @@
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.tracing import BAGGAGE_HEADER_NAME
+from sentry_sdk.tracing_utils import Baggage, should_propagate_trace
+from sentry_sdk.utils import (
+    SENSITIVE_DATA_SUBSTITUTE,
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    logger,
+    parse_url,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import MutableMapping
+    from typing import Any
+
+
+try:
+    from httpx import AsyncClient, Client, Request, Response  # type: ignore
+except ImportError:
+    raise DidNotEnable("httpx is not installed")
+
+__all__ = ["HttpxIntegration"]
+
+
+class HttpxIntegration(Integration):
+    identifier = "httpx"
+    origin = f"auto.http.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        """
+        httpx has its own transport layer and can be customized when needed,
+        so patch Client.send and AsyncClient.send to support both synchronous and async interfaces.
+        """
+        _install_httpx_client()
+        _install_httpx_async_client()
+
+
+def _install_httpx_client():
+    # type: () -> None
+    real_send = Client.send
+
+    @ensure_integration_enabled(HttpxIntegration, real_send)
+    def send(self, request, **kwargs):
+        # type: (Client, Request, **Any) -> Response
+        parsed_url = None
+        with capture_internal_exceptions():
+            parsed_url = parse_url(str(request.url), sanitize=False)
+
+        with sentry_sdk.start_span(
+            op=OP.HTTP_CLIENT,
+            name="%s %s"
+            % (
+                request.method,
+                parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE,
+            ),
+            origin=HttpxIntegration.origin,
+        ) as span:
+            span.set_data(SPANDATA.HTTP_METHOD, request.method)
+            if parsed_url is not None:
+                span.set_data("url", parsed_url.url)
+                span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query)
+                span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment)
+
+            if should_propagate_trace(sentry_sdk.get_client(), str(request.url)):
+                for (
+                    key,
+                    value,
+                ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers():
+                    logger.debug(
+                        "[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format(
+                            key=key, value=value, url=request.url
+                        )
+                    )
+
+                    if key == BAGGAGE_HEADER_NAME:
+                        _add_sentry_baggage_to_headers(request.headers, value)
+                    else:
+                        request.headers[key] = value
+
+            rv = real_send(self, request, **kwargs)
+
+            span.set_http_status(rv.status_code)
+            span.set_data("reason", rv.reason_phrase)
+
+            return rv
+
+    Client.send = send
+
+
+def _install_httpx_async_client():
+    # type: () -> None
+    real_send = AsyncClient.send
+
+    async def send(self, request, **kwargs):
+        # type: (AsyncClient, Request, **Any) -> Response
+        if sentry_sdk.get_client().get_integration(HttpxIntegration) is None:
+            return await real_send(self, request, **kwargs)
+
+        parsed_url = None
+        with capture_internal_exceptions():
+            parsed_url = parse_url(str(request.url), sanitize=False)
+
+        with sentry_sdk.start_span(
+            op=OP.HTTP_CLIENT,
+            name="%s %s"
+            % (
+                request.method,
+                parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE,
+            ),
+            origin=HttpxIntegration.origin,
+        ) as span:
+            span.set_data(SPANDATA.HTTP_METHOD, request.method)
+            if parsed_url is not None:
+                span.set_data("url", parsed_url.url)
+                span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query)
+                span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment)
+
+            if should_propagate_trace(sentry_sdk.get_client(), str(request.url)):
+                for (
+                    key,
+                    value,
+                ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers():
+                    logger.debug(
+                        "[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format(
+                            key=key, value=value, url=request.url
+                        )
+                    )
+                    if key == BAGGAGE_HEADER_NAME and request.headers.get(
+                        BAGGAGE_HEADER_NAME
+                    ):
+                        # do not overwrite any existing baggage, just append to it
+                        request.headers[key] += "," + value
+                    else:
+                        request.headers[key] = value
+
+            rv = await real_send(self, request, **kwargs)
+
+            span.set_http_status(rv.status_code)
+            span.set_data("reason", rv.reason_phrase)
+
+            return rv
+
+    AsyncClient.send = send
+
+
+def _add_sentry_baggage_to_headers(headers, sentry_baggage):
+    # type: (MutableMapping[str, str], str) -> None
+    """Add the Sentry baggage to the headers.
+
+    This function directly mutates the provided headers. The provided sentry_baggage
+    is appended to the existing baggage. If the baggage already contains Sentry items,
+    they are stripped out first.
+    """
+    existing_baggage = headers.get(BAGGAGE_HEADER_NAME, "")
+    stripped_existing_baggage = Baggage.strip_sentry_baggage(existing_baggage)
+
+    separator = "," if len(stripped_existing_baggage) > 0 else ""
+
+    headers[BAGGAGE_HEADER_NAME] = (
+        stripped_existing_baggage + separator + sentry_baggage
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huey.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huey.py
new file mode 100644
index 00000000..f0aff4c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huey.py
@@ -0,0 +1,174 @@
+import sys
+from datetime import datetime
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace, get_baggage, get_traceparent
+from sentry_sdk.consts import OP, SPANSTATUS
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import (
+    BAGGAGE_HEADER_NAME,
+    SENTRY_TRACE_HEADER_NAME,
+    TransactionSource,
+)
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    SENSITIVE_DATA_SUBSTITUTE,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Optional, Union, TypeVar
+
+    from sentry_sdk._types import EventProcessor, Event, Hint
+    from sentry_sdk.utils import ExcInfo
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+try:
+    from huey.api import Huey, Result, ResultGroup, Task, PeriodicTask
+    from huey.exceptions import CancelExecution, RetryTask, TaskLockedException
+except ImportError:
+    raise DidNotEnable("Huey is not installed")
+
+
+HUEY_CONTROL_FLOW_EXCEPTIONS = (CancelExecution, RetryTask, TaskLockedException)
+
+
+class HueyIntegration(Integration):
+    identifier = "huey"
+    origin = f"auto.queue.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        patch_enqueue()
+        patch_execute()
+
+
+def patch_enqueue():
+    # type: () -> None
+    old_enqueue = Huey.enqueue
+
+    @ensure_integration_enabled(HueyIntegration, old_enqueue)
+    def _sentry_enqueue(self, task):
+        # type: (Huey, Task) -> Optional[Union[Result, ResultGroup]]
+        with sentry_sdk.start_span(
+            op=OP.QUEUE_SUBMIT_HUEY,
+            name=task.name,
+            origin=HueyIntegration.origin,
+        ):
+            if not isinstance(task, PeriodicTask):
+                # Attach trace propagation data to task kwargs. We do
+                # not do this for periodic tasks, as these don't
+                # really have an originating transaction.
+                task.kwargs["sentry_headers"] = {
+                    BAGGAGE_HEADER_NAME: get_baggage(),
+                    SENTRY_TRACE_HEADER_NAME: get_traceparent(),
+                }
+            return old_enqueue(self, task)
+
+    Huey.enqueue = _sentry_enqueue
+
+
+def _make_event_processor(task):
+    # type: (Any) -> EventProcessor
+    def event_processor(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+
+        with capture_internal_exceptions():
+            tags = event.setdefault("tags", {})
+            tags["huey_task_id"] = task.id
+            tags["huey_task_retry"] = task.default_retries > task.retries
+            extra = event.setdefault("extra", {})
+            extra["huey-job"] = {
+                "task": task.name,
+                "args": (
+                    task.args
+                    if should_send_default_pii()
+                    else SENSITIVE_DATA_SUBSTITUTE
+                ),
+                "kwargs": (
+                    task.kwargs
+                    if should_send_default_pii()
+                    else SENSITIVE_DATA_SUBSTITUTE
+                ),
+                "retry": (task.default_retries or 0) - task.retries,
+            }
+
+        return event
+
+    return event_processor
+
+
+def _capture_exception(exc_info):
+    # type: (ExcInfo) -> None
+    scope = sentry_sdk.get_current_scope()
+
+    if exc_info[0] in HUEY_CONTROL_FLOW_EXCEPTIONS:
+        scope.transaction.set_status(SPANSTATUS.ABORTED)
+        return
+
+    scope.transaction.set_status(SPANSTATUS.INTERNAL_ERROR)
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": HueyIntegration.identifier, "handled": False},
+    )
+    scope.capture_event(event, hint=hint)
+
+
+def _wrap_task_execute(func):
+    # type: (F) -> F
+
+    @ensure_integration_enabled(HueyIntegration, func)
+    def _sentry_execute(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        try:
+            result = func(*args, **kwargs)
+        except Exception:
+            exc_info = sys.exc_info()
+            _capture_exception(exc_info)
+            reraise(*exc_info)
+
+        return result
+
+    return _sentry_execute  # type: ignore
+
+
+def patch_execute():
+    # type: () -> None
+    old_execute = Huey._execute
+
+    @ensure_integration_enabled(HueyIntegration, old_execute)
+    def _sentry_execute(self, task, timestamp=None):
+        # type: (Huey, Task, Optional[datetime]) -> Any
+        with sentry_sdk.isolation_scope() as scope:
+            with capture_internal_exceptions():
+                scope._name = "huey"
+                scope.clear_breadcrumbs()
+                scope.add_event_processor(_make_event_processor(task))
+
+            sentry_headers = task.kwargs.pop("sentry_headers", None)
+
+            transaction = continue_trace(
+                sentry_headers or {},
+                name=task.name,
+                op=OP.QUEUE_TASK_HUEY,
+                source=TransactionSource.TASK,
+                origin=HueyIntegration.origin,
+            )
+            transaction.set_status(SPANSTATUS.OK)
+
+            if not getattr(task, "_sentry_is_patched", False):
+                task.execute = _wrap_task_execute(task.execute)
+                task._sentry_is_patched = True
+
+            with sentry_sdk.start_transaction(transaction):
+                return old_execute(self, task, timestamp)
+
+    Huey._execute = _sentry_execute
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huggingface_hub.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huggingface_hub.py
new file mode 100644
index 00000000..d09f6e21
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/huggingface_hub.py
@@ -0,0 +1,175 @@
+from functools import wraps
+
+from sentry_sdk import consts
+from sentry_sdk.ai.monitoring import record_token_usage
+from sentry_sdk.ai.utils import set_data_normalized
+from sentry_sdk.consts import SPANDATA
+
+from typing import Any, Iterable, Callable
+
+import sentry_sdk
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+)
+
+try:
+    import huggingface_hub.inference._client
+
+    from huggingface_hub import ChatCompletionStreamOutput, TextGenerationOutput
+except ImportError:
+    raise DidNotEnable("Huggingface not installed")
+
+
+class HuggingfaceHubIntegration(Integration):
+    identifier = "huggingface_hub"
+    origin = f"auto.ai.{identifier}"
+
+    def __init__(self, include_prompts=True):
+        # type: (HuggingfaceHubIntegration, bool) -> None
+        self.include_prompts = include_prompts
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        huggingface_hub.inference._client.InferenceClient.text_generation = (
+            _wrap_text_generation(
+                huggingface_hub.inference._client.InferenceClient.text_generation
+            )
+        )
+
+
+def _capture_exception(exc):
+    # type: (Any) -> None
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "huggingface_hub", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _wrap_text_generation(f):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+    @wraps(f)
+    def new_text_generation(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(HuggingfaceHubIntegration)
+        if integration is None:
+            return f(*args, **kwargs)
+
+        if "prompt" in kwargs:
+            prompt = kwargs["prompt"]
+        elif len(args) >= 2:
+            kwargs["prompt"] = args[1]
+            prompt = kwargs["prompt"]
+            args = (args[0],) + args[2:]
+        else:
+            # invalid call, let it return error
+            return f(*args, **kwargs)
+
+        model = kwargs.get("model")
+        streaming = kwargs.get("stream")
+
+        span = sentry_sdk.start_span(
+            op=consts.OP.HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE,
+            name="Text Generation",
+            origin=HuggingfaceHubIntegration.origin,
+        )
+        span.__enter__()
+        try:
+            res = f(*args, **kwargs)
+        except Exception as e:
+            _capture_exception(e)
+            span.__exit__(None, None, None)
+            raise e from None
+
+        with capture_internal_exceptions():
+            if should_send_default_pii() and integration.include_prompts:
+                set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompt)
+
+            set_data_normalized(span, SPANDATA.AI_MODEL_ID, model)
+            set_data_normalized(span, SPANDATA.AI_STREAMING, streaming)
+
+            if isinstance(res, str):
+                if should_send_default_pii() and integration.include_prompts:
+                    set_data_normalized(
+                        span,
+                        "ai.responses",
+                        [res],
+                    )
+                span.__exit__(None, None, None)
+                return res
+
+            if isinstance(res, TextGenerationOutput):
+                if should_send_default_pii() and integration.include_prompts:
+                    set_data_normalized(
+                        span,
+                        "ai.responses",
+                        [res.generated_text],
+                    )
+                if res.details is not None and res.details.generated_tokens > 0:
+                    record_token_usage(span, total_tokens=res.details.generated_tokens)
+                span.__exit__(None, None, None)
+                return res
+
+            if not isinstance(res, Iterable):
+                # we only know how to deal with strings and iterables, ignore
+                set_data_normalized(span, "unknown_response", True)
+                span.__exit__(None, None, None)
+                return res
+
+            if kwargs.get("details", False):
+                # res is Iterable[TextGenerationStreamOutput]
+                def new_details_iterator():
+                    # type: () -> Iterable[ChatCompletionStreamOutput]
+                    with capture_internal_exceptions():
+                        tokens_used = 0
+                        data_buf: list[str] = []
+                        for x in res:
+                            if hasattr(x, "token") and hasattr(x.token, "text"):
+                                data_buf.append(x.token.text)
+                            if hasattr(x, "details") and hasattr(
+                                x.details, "generated_tokens"
+                            ):
+                                tokens_used = x.details.generated_tokens
+                            yield x
+                        if (
+                            len(data_buf) > 0
+                            and should_send_default_pii()
+                            and integration.include_prompts
+                        ):
+                            set_data_normalized(
+                                span, SPANDATA.AI_RESPONSES, "".join(data_buf)
+                            )
+                        if tokens_used > 0:
+                            record_token_usage(span, total_tokens=tokens_used)
+                    span.__exit__(None, None, None)
+
+                return new_details_iterator()
+            else:
+                # res is Iterable[str]
+
+                def new_iterator():
+                    # type: () -> Iterable[str]
+                    data_buf: list[str] = []
+                    with capture_internal_exceptions():
+                        for s in res:
+                            if isinstance(s, str):
+                                data_buf.append(s)
+                            yield s
+                        if (
+                            len(data_buf) > 0
+                            and should_send_default_pii()
+                            and integration.include_prompts
+                        ):
+                            set_data_normalized(
+                                span, SPANDATA.AI_RESPONSES, "".join(data_buf)
+                            )
+                        span.__exit__(None, None, None)
+
+                return new_iterator()
+
+    return new_text_generation
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/langchain.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/langchain.py
new file mode 100644
index 00000000..431fc46b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/langchain.py
@@ -0,0 +1,465 @@
+from collections import OrderedDict
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.ai.monitoring import set_ai_pipeline_name, record_token_usage
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.ai.utils import set_data_normalized
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import Span
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.utils import logger, capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, List, Callable, Dict, Union, Optional
+    from uuid import UUID
+
+try:
+    from langchain_core.messages import BaseMessage
+    from langchain_core.outputs import LLMResult
+    from langchain_core.callbacks import (
+        manager,
+        BaseCallbackHandler,
+    )
+    from langchain_core.agents import AgentAction, AgentFinish
+except ImportError:
+    raise DidNotEnable("langchain not installed")
+
+
+DATA_FIELDS = {
+    "temperature": SPANDATA.AI_TEMPERATURE,
+    "top_p": SPANDATA.AI_TOP_P,
+    "top_k": SPANDATA.AI_TOP_K,
+    "function_call": SPANDATA.AI_FUNCTION_CALL,
+    "tool_calls": SPANDATA.AI_TOOL_CALLS,
+    "tools": SPANDATA.AI_TOOLS,
+    "response_format": SPANDATA.AI_RESPONSE_FORMAT,
+    "logit_bias": SPANDATA.AI_LOGIT_BIAS,
+    "tags": SPANDATA.AI_TAGS,
+}
+
+# To avoid double collecting tokens, we do *not* measure
+# token counts for models for which we have an explicit integration
+NO_COLLECT_TOKEN_MODELS = [
+    "openai-chat",
+    "anthropic-chat",
+    "cohere-chat",
+    "huggingface_endpoint",
+]
+
+
+class LangchainIntegration(Integration):
+    identifier = "langchain"
+    origin = f"auto.ai.{identifier}"
+
+    # The most number of spans (e.g., LLM calls) that can be processed at the same time.
+    max_spans = 1024
+
+    def __init__(
+        self, include_prompts=True, max_spans=1024, tiktoken_encoding_name=None
+    ):
+        # type: (LangchainIntegration, bool, int, Optional[str]) -> None
+        self.include_prompts = include_prompts
+        self.max_spans = max_spans
+        self.tiktoken_encoding_name = tiktoken_encoding_name
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        manager._configure = _wrap_configure(manager._configure)
+
+
+class WatchedSpan:
+    span = None  # type: Span
+    num_completion_tokens = 0  # type: int
+    num_prompt_tokens = 0  # type: int
+    no_collect_tokens = False  # type: bool
+    children = []  # type: List[WatchedSpan]
+    is_pipeline = False  # type: bool
+
+    def __init__(self, span):
+        # type: (Span) -> None
+        self.span = span
+
+
+class SentryLangchainCallback(BaseCallbackHandler):  # type: ignore[misc]
+    """Base callback handler that can be used to handle callbacks from langchain."""
+
+    span_map = OrderedDict()  # type: OrderedDict[UUID, WatchedSpan]
+
+    max_span_map_size = 0
+
+    def __init__(self, max_span_map_size, include_prompts, tiktoken_encoding_name=None):
+        # type: (int, bool, Optional[str]) -> None
+        self.max_span_map_size = max_span_map_size
+        self.include_prompts = include_prompts
+
+        self.tiktoken_encoding = None
+        if tiktoken_encoding_name is not None:
+            import tiktoken  # type: ignore
+
+            self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
+
+    def count_tokens(self, s):
+        # type: (str) -> int
+        if self.tiktoken_encoding is not None:
+            return len(self.tiktoken_encoding.encode_ordinary(s))
+        return 0
+
+    def gc_span_map(self):
+        # type: () -> None
+
+        while len(self.span_map) > self.max_span_map_size:
+            run_id, watched_span = self.span_map.popitem(last=False)
+            self._exit_span(watched_span, run_id)
+
+    def _handle_error(self, run_id, error):
+        # type: (UUID, Any) -> None
+        if not run_id or run_id not in self.span_map:
+            return
+
+        span_data = self.span_map[run_id]
+        if not span_data:
+            return
+        sentry_sdk.capture_exception(error, span_data.span.scope)
+        span_data.span.__exit__(None, None, None)
+        del self.span_map[run_id]
+
+    def _normalize_langchain_message(self, message):
+        # type: (BaseMessage) -> Any
+        parsed = {"content": message.content, "role": message.type}
+        parsed.update(message.additional_kwargs)
+        return parsed
+
+    def _create_span(self, run_id, parent_id, **kwargs):
+        # type: (SentryLangchainCallback, UUID, Optional[Any], Any) -> WatchedSpan
+
+        watched_span = None  # type: Optional[WatchedSpan]
+        if parent_id:
+            parent_span = self.span_map.get(parent_id)  # type: Optional[WatchedSpan]
+            if parent_span:
+                watched_span = WatchedSpan(parent_span.span.start_child(**kwargs))
+                parent_span.children.append(watched_span)
+        if watched_span is None:
+            watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs))
+
+        if kwargs.get("op", "").startswith("ai.pipeline."):
+            if kwargs.get("name"):
+                set_ai_pipeline_name(kwargs.get("name"))
+            watched_span.is_pipeline = True
+
+        watched_span.span.__enter__()
+        self.span_map[run_id] = watched_span
+        self.gc_span_map()
+        return watched_span
+
+    def _exit_span(self, span_data, run_id):
+        # type: (SentryLangchainCallback, WatchedSpan, UUID) -> None
+
+        if span_data.is_pipeline:
+            set_ai_pipeline_name(None)
+
+        span_data.span.__exit__(None, None, None)
+        del self.span_map[run_id]
+
+    def on_llm_start(
+        self,
+        serialized,
+        prompts,
+        *,
+        run_id,
+        tags=None,
+        parent_run_id=None,
+        metadata=None,
+        **kwargs,
+    ):
+        # type: (SentryLangchainCallback, Dict[str, Any], List[str], UUID, Optional[List[str]], Optional[UUID], Optional[Dict[str, Any]], Any) -> Any
+        """Run when LLM starts running."""
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+            all_params = kwargs.get("invocation_params", {})
+            all_params.update(serialized.get("kwargs", {}))
+            watched_span = self._create_span(
+                run_id,
+                kwargs.get("parent_run_id"),
+                op=OP.LANGCHAIN_RUN,
+                name=kwargs.get("name") or "Langchain LLM call",
+                origin=LangchainIntegration.origin,
+            )
+            span = watched_span.span
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompts)
+            for k, v in DATA_FIELDS.items():
+                if k in all_params:
+                    set_data_normalized(span, v, all_params[k])
+
+    def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
+        """Run when Chat Model starts running."""
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+            all_params = kwargs.get("invocation_params", {})
+            all_params.update(serialized.get("kwargs", {}))
+            watched_span = self._create_span(
+                run_id,
+                kwargs.get("parent_run_id"),
+                op=OP.LANGCHAIN_CHAT_COMPLETIONS_CREATE,
+                name=kwargs.get("name") or "Langchain Chat Model",
+                origin=LangchainIntegration.origin,
+            )
+            span = watched_span.span
+            model = all_params.get(
+                "model", all_params.get("model_name", all_params.get("model_id"))
+            )
+            watched_span.no_collect_tokens = any(
+                x in all_params.get("_type", "") for x in NO_COLLECT_TOKEN_MODELS
+            )
+
+            if not model and "anthropic" in all_params.get("_type"):
+                model = "claude-2"
+            if model:
+                span.set_data(SPANDATA.AI_MODEL_ID, model)
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(
+                    span,
+                    SPANDATA.AI_INPUT_MESSAGES,
+                    [
+                        [self._normalize_langchain_message(x) for x in list_]
+                        for list_ in messages
+                    ],
+                )
+            for k, v in DATA_FIELDS.items():
+                if k in all_params:
+                    set_data_normalized(span, v, all_params[k])
+            if not watched_span.no_collect_tokens:
+                for list_ in messages:
+                    for message in list_:
+                        self.span_map[run_id].num_prompt_tokens += self.count_tokens(
+                            message.content
+                        ) + self.count_tokens(message.type)
+
+    def on_llm_new_token(self, token, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, str, UUID, Any) -> Any
+        """Run on new LLM token. Only available when streaming is enabled."""
+        with capture_internal_exceptions():
+            if not run_id or run_id not in self.span_map:
+                return
+            span_data = self.span_map[run_id]
+            if not span_data or span_data.no_collect_tokens:
+                return
+            span_data.num_completion_tokens += self.count_tokens(token)
+
+    def on_llm_end(self, response, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
+        """Run when LLM ends running."""
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+
+            token_usage = (
+                response.llm_output.get("token_usage") if response.llm_output else None
+            )
+
+            span_data = self.span_map[run_id]
+            if not span_data:
+                return
+
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(
+                    span_data.span,
+                    SPANDATA.AI_RESPONSES,
+                    [[x.text for x in list_] for list_ in response.generations],
+                )
+
+            if not span_data.no_collect_tokens:
+                if token_usage:
+                    record_token_usage(
+                        span_data.span,
+                        token_usage.get("prompt_tokens"),
+                        token_usage.get("completion_tokens"),
+                        token_usage.get("total_tokens"),
+                    )
+                else:
+                    record_token_usage(
+                        span_data.span,
+                        span_data.num_prompt_tokens,
+                        span_data.num_completion_tokens,
+                    )
+
+            self._exit_span(span_data, run_id)
+
+    def on_llm_error(self, error, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
+        """Run when LLM errors."""
+        with capture_internal_exceptions():
+            self._handle_error(run_id, error)
+
+    def on_chain_start(self, serialized, inputs, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Dict[str, Any], Dict[str, Any], UUID, Any) -> Any
+        """Run when chain starts running."""
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+            watched_span = self._create_span(
+                run_id,
+                kwargs.get("parent_run_id"),
+                op=(
+                    OP.LANGCHAIN_RUN
+                    if kwargs.get("parent_run_id") is not None
+                    else OP.LANGCHAIN_PIPELINE
+                ),
+                name=kwargs.get("name") or "Chain execution",
+                origin=LangchainIntegration.origin,
+            )
+            metadata = kwargs.get("metadata")
+            if metadata:
+                set_data_normalized(watched_span.span, SPANDATA.AI_METADATA, metadata)
+
+    def on_chain_end(self, outputs, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Dict[str, Any], UUID, Any) -> Any
+        """Run when chain ends running."""
+        with capture_internal_exceptions():
+            if not run_id or run_id not in self.span_map:
+                return
+
+            span_data = self.span_map[run_id]
+            if not span_data:
+                return
+            self._exit_span(span_data, run_id)
+
+    def on_chain_error(self, error, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
+        """Run when chain errors."""
+        self._handle_error(run_id, error)
+
+    def on_agent_action(self, action, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, AgentAction, UUID, Any) -> Any
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+            watched_span = self._create_span(
+                run_id,
+                kwargs.get("parent_run_id"),
+                op=OP.LANGCHAIN_AGENT,
+                name=action.tool or "AI tool usage",
+                origin=LangchainIntegration.origin,
+            )
+            if action.tool_input and should_send_default_pii() and self.include_prompts:
+                set_data_normalized(
+                    watched_span.span, SPANDATA.AI_INPUT_MESSAGES, action.tool_input
+                )
+
+    def on_agent_finish(self, finish, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+
+            span_data = self.span_map[run_id]
+            if not span_data:
+                return
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(
+                    span_data.span, SPANDATA.AI_RESPONSES, finish.return_values.items()
+                )
+            self._exit_span(span_data, run_id)
+
+    def on_tool_start(self, serialized, input_str, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Dict[str, Any], str, UUID, Any) -> Any
+        """Run when tool starts running."""
+        with capture_internal_exceptions():
+            if not run_id:
+                return
+            watched_span = self._create_span(
+                run_id,
+                kwargs.get("parent_run_id"),
+                op=OP.LANGCHAIN_TOOL,
+                name=serialized.get("name") or kwargs.get("name") or "AI tool usage",
+                origin=LangchainIntegration.origin,
+            )
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(
+                    watched_span.span,
+                    SPANDATA.AI_INPUT_MESSAGES,
+                    kwargs.get("inputs", [input_str]),
+                )
+                if kwargs.get("metadata"):
+                    set_data_normalized(
+                        watched_span.span, SPANDATA.AI_METADATA, kwargs.get("metadata")
+                    )
+
+    def on_tool_end(self, output, *, run_id, **kwargs):
+        # type: (SentryLangchainCallback, str, UUID, Any) -> Any
+        """Run when tool ends running."""
+        with capture_internal_exceptions():
+            if not run_id or run_id not in self.span_map:
+                return
+
+            span_data = self.span_map[run_id]
+            if not span_data:
+                return
+            if should_send_default_pii() and self.include_prompts:
+                set_data_normalized(span_data.span, SPANDATA.AI_RESPONSES, output)
+            self._exit_span(span_data, run_id)
+
+    def on_tool_error(self, error, *args, run_id, **kwargs):
+        # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
+        """Run when tool errors."""
+        self._handle_error(run_id, error)
+
+
+def _wrap_configure(f):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+
+    @wraps(f)
+    def new_configure(*args, **kwargs):
+        # type: (Any, Any) -> Any
+
+        integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
+        if integration is None:
+            return f(*args, **kwargs)
+
+        with capture_internal_exceptions():
+            new_callbacks = []  # type: List[BaseCallbackHandler]
+            if "local_callbacks" in kwargs:
+                existing_callbacks = kwargs["local_callbacks"]
+                kwargs["local_callbacks"] = new_callbacks
+            elif len(args) > 2:
+                existing_callbacks = args[2]
+                args = (
+                    args[0],
+                    args[1],
+                    new_callbacks,
+                ) + args[3:]
+            else:
+                existing_callbacks = []
+
+            if existing_callbacks:
+                if isinstance(existing_callbacks, list):
+                    for cb in existing_callbacks:
+                        new_callbacks.append(cb)
+                elif isinstance(existing_callbacks, BaseCallbackHandler):
+                    new_callbacks.append(existing_callbacks)
+                else:
+                    logger.debug("Unknown callback type: %s", existing_callbacks)
+
+            already_added = False
+            for callback in new_callbacks:
+                if isinstance(callback, SentryLangchainCallback):
+                    already_added = True
+
+            if not already_added:
+                new_callbacks.append(
+                    SentryLangchainCallback(
+                        integration.max_spans,
+                        integration.include_prompts,
+                        integration.tiktoken_encoding_name,
+                    )
+                )
+        return f(*args, **kwargs)
+
+    return new_configure
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/launchdarkly.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/launchdarkly.py
new file mode 100644
index 00000000..cb9e9114
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/launchdarkly.py
@@ -0,0 +1,62 @@
+from typing import TYPE_CHECKING
+import sentry_sdk
+
+from sentry_sdk.integrations import DidNotEnable, Integration
+
+try:
+    import ldclient
+    from ldclient.hook import Hook, Metadata
+
+    if TYPE_CHECKING:
+        from ldclient import LDClient
+        from ldclient.hook import EvaluationSeriesContext
+        from ldclient.evaluation import EvaluationDetail
+
+        from typing import Any
+except ImportError:
+    raise DidNotEnable("LaunchDarkly is not installed")
+
+
+class LaunchDarklyIntegration(Integration):
+    identifier = "launchdarkly"
+
+    def __init__(self, ld_client=None):
+        # type: (LDClient | None) -> None
+        """
+        :param client: An initialized LDClient instance. If a client is not provided, this
+            integration will attempt to use the shared global instance.
+        """
+        try:
+            client = ld_client or ldclient.get()
+        except Exception as exc:
+            raise DidNotEnable("Error getting LaunchDarkly client. " + repr(exc))
+
+        if not client.is_initialized():
+            raise DidNotEnable("LaunchDarkly client is not initialized.")
+
+        # Register the flag collection hook with the LD client.
+        client.add_hook(LaunchDarklyHook())
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        pass
+
+
+class LaunchDarklyHook(Hook):
+
+    @property
+    def metadata(self):
+        # type: () -> Metadata
+        return Metadata(name="sentry-flag-auditor")
+
+    def after_evaluation(self, series_context, data, detail):
+        # type: (EvaluationSeriesContext, dict[Any, Any], EvaluationDetail) -> dict[Any, Any]
+        if isinstance(detail.value, bool):
+            flags = sentry_sdk.get_current_scope().flags
+            flags.set(series_context.key, detail.value)
+        return data
+
+    def before_evaluation(self, series_context, data):
+        # type: (EvaluationSeriesContext, dict[Any, Any]) -> dict[Any, Any]
+        return data  # No-op.
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/litestar.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/litestar.py
new file mode 100644
index 00000000..5f0b32b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/litestar.py
@@ -0,0 +1,306 @@
+from collections.abc import Set
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import (
+    _DEFAULT_FAILED_REQUEST_STATUS_CODES,
+    DidNotEnable,
+    Integration,
+)
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import TransactionSource, SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    ensure_integration_enabled,
+    event_from_exception,
+    transaction_from_function,
+)
+
+try:
+    from litestar import Request, Litestar  # type: ignore
+    from litestar.handlers.base import BaseRouteHandler  # type: ignore
+    from litestar.middleware import DefineMiddleware  # type: ignore
+    from litestar.routes.http import HTTPRoute  # type: ignore
+    from litestar.data_extractors import ConnectionDataExtractor  # type: ignore
+    from litestar.exceptions import HTTPException  # type: ignore
+except ImportError:
+    raise DidNotEnable("Litestar is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Optional, Union
+    from litestar.types.asgi_types import ASGIApp  # type: ignore
+    from litestar.types import (  # type: ignore
+        HTTPReceiveMessage,
+        HTTPScope,
+        Message,
+        Middleware,
+        Receive,
+        Scope as LitestarScope,
+        Send,
+        WebSocketReceiveMessage,
+    )
+    from litestar.middleware import MiddlewareProtocol
+    from sentry_sdk._types import Event, Hint
+
+_DEFAULT_TRANSACTION_NAME = "generic Litestar request"
+
+
+class LitestarIntegration(Integration):
+    identifier = "litestar"
+    origin = f"auto.http.{identifier}"
+
+    def __init__(
+        self,
+        failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES,  # type: Set[int]
+    ) -> None:
+        self.failed_request_status_codes = failed_request_status_codes
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        patch_app_init()
+        patch_middlewares()
+        patch_http_route_handle()
+
+        # The following line follows the pattern found in other integrations such as `DjangoIntegration.setup_once`.
+        # The Litestar `ExceptionHandlerMiddleware.__call__` catches exceptions and does the following
+        # (among other things):
+        #   1. Logs them, some at least (such as 500s) as errors
+        #   2. Calls after_exception hooks
+        # The `LitestarIntegration`` provides an after_exception hook (see `patch_app_init` below) to create a Sentry event
+        # from an exception, which ends up being called during step 2 above. However, the Sentry `LoggingIntegration` will
+        # by default create a Sentry event from error logs made in step 1 if we do not prevent it from doing so.
+        ignore_logger("litestar")
+
+
+class SentryLitestarASGIMiddleware(SentryAsgiMiddleware):
+    def __init__(self, app, span_origin=LitestarIntegration.origin):
+        # type: (ASGIApp, str) -> None
+
+        super().__init__(
+            app=app,
+            unsafe_context_data=False,
+            transaction_style="endpoint",
+            mechanism_type="asgi",
+            span_origin=span_origin,
+        )
+
+
+def patch_app_init():
+    # type: () -> None
+    """
+    Replaces the Litestar class's `__init__` function in order to inject `after_exception` handlers and set the
+    `SentryLitestarASGIMiddleware` as the outmost middleware in the stack.
+    See:
+    - https://docs.litestar.dev/2/usage/applications.html#after-exception
+    - https://docs.litestar.dev/2/usage/middleware/using-middleware.html
+    """
+    old__init__ = Litestar.__init__
+
+    @ensure_integration_enabled(LitestarIntegration, old__init__)
+    def injection_wrapper(self, *args, **kwargs):
+        # type: (Litestar, *Any, **Any) -> None
+        kwargs["after_exception"] = [
+            exception_handler,
+            *(kwargs.get("after_exception") or []),
+        ]
+
+        SentryLitestarASGIMiddleware.__call__ = SentryLitestarASGIMiddleware._run_asgi3  # type: ignore
+        middleware = kwargs.get("middleware") or []
+        kwargs["middleware"] = [SentryLitestarASGIMiddleware, *middleware]
+        old__init__(self, *args, **kwargs)
+
+    Litestar.__init__ = injection_wrapper
+
+
+def patch_middlewares():
+    # type: () -> None
+    old_resolve_middleware_stack = BaseRouteHandler.resolve_middleware
+
+    @ensure_integration_enabled(LitestarIntegration, old_resolve_middleware_stack)
+    def resolve_middleware_wrapper(self):
+        # type: (BaseRouteHandler) -> list[Middleware]
+        return [
+            enable_span_for_middleware(middleware)
+            for middleware in old_resolve_middleware_stack(self)
+        ]
+
+    BaseRouteHandler.resolve_middleware = resolve_middleware_wrapper
+
+
+def enable_span_for_middleware(middleware):
+    # type: (Middleware) -> Middleware
+    if (
+        not hasattr(middleware, "__call__")  # noqa: B004
+        or middleware is SentryLitestarASGIMiddleware
+    ):
+        return middleware
+
+    if isinstance(middleware, DefineMiddleware):
+        old_call = middleware.middleware.__call__  # type: ASGIApp
+    else:
+        old_call = middleware.__call__
+
+    async def _create_span_call(self, scope, receive, send):
+        # type: (MiddlewareProtocol, LitestarScope, Receive, Send) -> None
+        if sentry_sdk.get_client().get_integration(LitestarIntegration) is None:
+            return await old_call(self, scope, receive, send)
+
+        middleware_name = self.__class__.__name__
+        with sentry_sdk.start_span(
+            op=OP.MIDDLEWARE_LITESTAR,
+            name=middleware_name,
+            origin=LitestarIntegration.origin,
+        ) as middleware_span:
+            middleware_span.set_tag("litestar.middleware_name", middleware_name)
+
+            # Creating spans for the "receive" callback
+            async def _sentry_receive(*args, **kwargs):
+                # type: (*Any, **Any) -> Union[HTTPReceiveMessage, WebSocketReceiveMessage]
+                if sentry_sdk.get_client().get_integration(LitestarIntegration) is None:
+                    return await receive(*args, **kwargs)
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_LITESTAR_RECEIVE,
+                    name=getattr(receive, "__qualname__", str(receive)),
+                    origin=LitestarIntegration.origin,
+                ) as span:
+                    span.set_tag("litestar.middleware_name", middleware_name)
+                    return await receive(*args, **kwargs)
+
+            receive_name = getattr(receive, "__name__", str(receive))
+            receive_patched = receive_name == "_sentry_receive"
+            new_receive = _sentry_receive if not receive_patched else receive
+
+            # Creating spans for the "send" callback
+            async def _sentry_send(message):
+                # type: (Message) -> None
+                if sentry_sdk.get_client().get_integration(LitestarIntegration) is None:
+                    return await send(message)
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_LITESTAR_SEND,
+                    name=getattr(send, "__qualname__", str(send)),
+                    origin=LitestarIntegration.origin,
+                ) as span:
+                    span.set_tag("litestar.middleware_name", middleware_name)
+                    return await send(message)
+
+            send_name = getattr(send, "__name__", str(send))
+            send_patched = send_name == "_sentry_send"
+            new_send = _sentry_send if not send_patched else send
+
+            return await old_call(self, scope, new_receive, new_send)
+
+    not_yet_patched = old_call.__name__ not in ["_create_span_call"]
+
+    if not_yet_patched:
+        if isinstance(middleware, DefineMiddleware):
+            middleware.middleware.__call__ = _create_span_call
+        else:
+            middleware.__call__ = _create_span_call
+
+    return middleware
+
+
+def patch_http_route_handle():
+    # type: () -> None
+    old_handle = HTTPRoute.handle
+
+    async def handle_wrapper(self, scope, receive, send):
+        # type: (HTTPRoute, HTTPScope, Receive, Send) -> None
+        if sentry_sdk.get_client().get_integration(LitestarIntegration) is None:
+            return await old_handle(self, scope, receive, send)
+
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        request = scope["app"].request_class(
+            scope=scope, receive=receive, send=send
+        )  # type: Request[Any, Any]
+        extracted_request_data = ConnectionDataExtractor(
+            parse_body=True, parse_query=True
+        )(request)
+        body = extracted_request_data.pop("body")
+
+        request_data = await body
+
+        def event_processor(event, _):
+            # type: (Event, Hint) -> Event
+            route_handler = scope.get("route_handler")
+
+            request_info = event.get("request", {})
+            request_info["content_length"] = len(scope.get("_body", b""))
+            if should_send_default_pii():
+                request_info["cookies"] = extracted_request_data["cookies"]
+            if request_data is not None:
+                request_info["data"] = request_data
+
+            func = None
+            if route_handler.name is not None:
+                tx_name = route_handler.name
+            # Accounts for use of type `Ref` in earlier versions of litestar without the need to reference it as a type
+            elif hasattr(route_handler.fn, "value"):
+                func = route_handler.fn.value
+            else:
+                func = route_handler.fn
+            if func is not None:
+                tx_name = transaction_from_function(func)
+
+            tx_info = {"source": SOURCE_FOR_STYLE["endpoint"]}
+
+            if not tx_name:
+                tx_name = _DEFAULT_TRANSACTION_NAME
+                tx_info = {"source": TransactionSource.ROUTE}
+
+            event.update(
+                {
+                    "request": request_info,
+                    "transaction": tx_name,
+                    "transaction_info": tx_info,
+                }
+            )
+            return event
+
+        sentry_scope._name = LitestarIntegration.identifier
+        sentry_scope.add_event_processor(event_processor)
+
+        return await old_handle(self, scope, receive, send)
+
+    HTTPRoute.handle = handle_wrapper
+
+
+def retrieve_user_from_scope(scope):
+    # type: (LitestarScope) -> Optional[dict[str, Any]]
+    scope_user = scope.get("user")
+    if isinstance(scope_user, dict):
+        return scope_user
+    if hasattr(scope_user, "asdict"):  # dataclasses
+        return scope_user.asdict()
+
+    return None
+
+
+@ensure_integration_enabled(LitestarIntegration)
+def exception_handler(exc, scope):
+    # type: (Exception, LitestarScope) -> None
+    user_info = None  # type: Optional[dict[str, Any]]
+    if should_send_default_pii():
+        user_info = retrieve_user_from_scope(scope)
+    if user_info and isinstance(user_info, dict):
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        sentry_scope.set_user(user_info)
+
+    if isinstance(exc, HTTPException):
+        integration = sentry_sdk.get_client().get_integration(LitestarIntegration)
+        if (
+            integration is not None
+            and exc.status_code not in integration.failed_request_status_codes
+        ):
+            return
+
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": LitestarIntegration.identifier, "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/logging.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/logging.py
new file mode 100644
index 00000000..3777381b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/logging.py
@@ -0,0 +1,298 @@
+import logging
+from datetime import datetime, timezone
+from fnmatch import fnmatch
+
+import sentry_sdk
+from sentry_sdk.utils import (
+    to_string,
+    event_from_exception,
+    current_stacktrace,
+    capture_internal_exceptions,
+)
+from sentry_sdk.integrations import Integration
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import MutableMapping
+    from logging import LogRecord
+    from typing import Any
+    from typing import Dict
+    from typing import Optional
+
+DEFAULT_LEVEL = logging.INFO
+DEFAULT_EVENT_LEVEL = logging.ERROR
+LOGGING_TO_EVENT_LEVEL = {
+    logging.NOTSET: "notset",
+    logging.DEBUG: "debug",
+    logging.INFO: "info",
+    logging.WARN: "warning",  # WARN is same a WARNING
+    logging.WARNING: "warning",
+    logging.ERROR: "error",
+    logging.FATAL: "fatal",
+    logging.CRITICAL: "fatal",  # CRITICAL is same as FATAL
+}
+
+# Capturing events from those loggers causes recursion errors. We cannot allow
+# the user to unconditionally create events from those loggers under any
+# circumstances.
+#
+# Note: Ignoring by logger name here is better than mucking with thread-locals.
+# We do not necessarily know whether thread-locals work 100% correctly in the user's environment.
+_IGNORED_LOGGERS = set(
+    ["sentry_sdk.errors", "urllib3.connectionpool", "urllib3.connection"]
+)
+
+
+def ignore_logger(
+    name,  # type: str
+):
+    # type: (...) -> None
+    """This disables recording (both in breadcrumbs and as events) calls to
+    a logger of a specific name.  Among other uses, many of our integrations
+    use this to prevent their actions being recorded as breadcrumbs. Exposed
+    to users as a way to quiet spammy loggers.
+
+    :param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
+    """
+    _IGNORED_LOGGERS.add(name)
+
+
+class LoggingIntegration(Integration):
+    identifier = "logging"
+
+    def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
+        # type: (Optional[int], Optional[int]) -> None
+        self._handler = None
+        self._breadcrumb_handler = None
+
+        if level is not None:
+            self._breadcrumb_handler = BreadcrumbHandler(level=level)
+
+        if event_level is not None:
+            self._handler = EventHandler(level=event_level)
+
+    def _handle_record(self, record):
+        # type: (LogRecord) -> None
+        if self._handler is not None and record.levelno >= self._handler.level:
+            self._handler.handle(record)
+
+        if (
+            self._breadcrumb_handler is not None
+            and record.levelno >= self._breadcrumb_handler.level
+        ):
+            self._breadcrumb_handler.handle(record)
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        old_callhandlers = logging.Logger.callHandlers
+
+        def sentry_patched_callhandlers(self, record):
+            # type: (Any, LogRecord) -> Any
+            # keeping a local reference because the
+            # global might be discarded on shutdown
+            ignored_loggers = _IGNORED_LOGGERS
+
+            try:
+                return old_callhandlers(self, record)
+            finally:
+                # This check is done twice, once also here before we even get
+                # the integration.  Otherwise we have a high chance of getting
+                # into a recursion error when the integration is resolved
+                # (this also is slower).
+                if ignored_loggers is not None and record.name not in ignored_loggers:
+                    integration = sentry_sdk.get_client().get_integration(
+                        LoggingIntegration
+                    )
+                    if integration is not None:
+                        integration._handle_record(record)
+
+        logging.Logger.callHandlers = sentry_patched_callhandlers  # type: ignore
+
+
+class _BaseHandler(logging.Handler):
+    COMMON_RECORD_ATTRS = frozenset(
+        (
+            "args",
+            "created",
+            "exc_info",
+            "exc_text",
+            "filename",
+            "funcName",
+            "levelname",
+            "levelno",
+            "linenno",
+            "lineno",
+            "message",
+            "module",
+            "msecs",
+            "msg",
+            "name",
+            "pathname",
+            "process",
+            "processName",
+            "relativeCreated",
+            "stack",
+            "tags",
+            "taskName",
+            "thread",
+            "threadName",
+            "stack_info",
+        )
+    )
+
+    def _can_record(self, record):
+        # type: (LogRecord) -> bool
+        """Prevents ignored loggers from recording"""
+        for logger in _IGNORED_LOGGERS:
+            if fnmatch(record.name, logger):
+                return False
+        return True
+
+    def _logging_to_event_level(self, record):
+        # type: (LogRecord) -> str
+        return LOGGING_TO_EVENT_LEVEL.get(
+            record.levelno, record.levelname.lower() if record.levelname else ""
+        )
+
+    def _extra_from_record(self, record):
+        # type: (LogRecord) -> MutableMapping[str, object]
+        return {
+            k: v
+            for k, v in vars(record).items()
+            if k not in self.COMMON_RECORD_ATTRS
+            and (not isinstance(k, str) or not k.startswith("_"))
+        }
+
+
+class EventHandler(_BaseHandler):
+    """
+    A logging handler that emits Sentry events for each log record
+
+    Note that you do not have to use this class if the logging integration is enabled, which it is by default.
+    """
+
+    def emit(self, record):
+        # type: (LogRecord) -> Any
+        with capture_internal_exceptions():
+            self.format(record)
+            return self._emit(record)
+
+    def _emit(self, record):
+        # type: (LogRecord) -> None
+        if not self._can_record(record):
+            return
+
+        client = sentry_sdk.get_client()
+        if not client.is_active():
+            return
+
+        client_options = client.options
+
+        # exc_info might be None or (None, None, None)
+        #
+        # exc_info may also be any falsy value due to Python stdlib being
+        # liberal with what it receives and Celery's billiard being "liberal"
+        # with what it sends. See
+        # https://github.com/getsentry/sentry-python/issues/904
+        if record.exc_info and record.exc_info[0] is not None:
+            event, hint = event_from_exception(
+                record.exc_info,
+                client_options=client_options,
+                mechanism={"type": "logging", "handled": True},
+            )
+        elif (record.exc_info and record.exc_info[0] is None) or record.stack_info:
+            event = {}
+            hint = {}
+            with capture_internal_exceptions():
+                event["threads"] = {
+                    "values": [
+                        {
+                            "stacktrace": current_stacktrace(
+                                include_local_variables=client_options[
+                                    "include_local_variables"
+                                ],
+                                max_value_length=client_options["max_value_length"],
+                            ),
+                            "crashed": False,
+                            "current": True,
+                        }
+                    ]
+                }
+        else:
+            event = {}
+            hint = {}
+
+        hint["log_record"] = record
+
+        level = self._logging_to_event_level(record)
+        if level in {"debug", "info", "warning", "error", "critical", "fatal"}:
+            event["level"] = level  # type: ignore[typeddict-item]
+        event["logger"] = record.name
+
+        # Log records from `warnings` module as separate issues
+        record_captured_from_warnings_module = (
+            record.name == "py.warnings" and record.msg == "%s"
+        )
+        if record_captured_from_warnings_module:
+            # use the actual message and not "%s" as the message
+            # this prevents grouping all warnings under one "%s" issue
+            msg = record.args[0]  # type: ignore
+
+            event["logentry"] = {
+                "message": msg,
+                "params": (),
+            }
+
+        else:
+            event["logentry"] = {
+                "message": to_string(record.msg),
+                "params": (
+                    tuple(str(arg) if arg is None else arg for arg in record.args)
+                    if record.args
+                    else ()
+                ),
+            }
+
+        event["extra"] = self._extra_from_record(record)
+
+        sentry_sdk.capture_event(event, hint=hint)
+
+
+# Legacy name
+SentryHandler = EventHandler
+
+
+class BreadcrumbHandler(_BaseHandler):
+    """
+    A logging handler that records breadcrumbs for each log record.
+
+    Note that you do not have to use this class if the logging integration is enabled, which it is by default.
+    """
+
+    def emit(self, record):
+        # type: (LogRecord) -> Any
+        with capture_internal_exceptions():
+            self.format(record)
+            return self._emit(record)
+
+    def _emit(self, record):
+        # type: (LogRecord) -> None
+        if not self._can_record(record):
+            return
+
+        sentry_sdk.add_breadcrumb(
+            self._breadcrumb_from_record(record), hint={"log_record": record}
+        )
+
+    def _breadcrumb_from_record(self, record):
+        # type: (LogRecord) -> Dict[str, Any]
+        return {
+            "type": "log",
+            "level": self._logging_to_event_level(record),
+            "category": record.name,
+            "message": record.message,
+            "timestamp": datetime.fromtimestamp(record.created, timezone.utc),
+            "data": self._extra_from_record(record),
+        }
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/loguru.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/loguru.py
new file mode 100644
index 00000000..5b76ea81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/loguru.py
@@ -0,0 +1,130 @@
+import enum
+
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.logging import (
+    BreadcrumbHandler,
+    EventHandler,
+    _BaseHandler,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from logging import LogRecord
+    from typing import Optional, Tuple, Any
+
+try:
+    import loguru
+    from loguru import logger
+    from loguru._defaults import LOGURU_FORMAT as DEFAULT_FORMAT
+except ImportError:
+    raise DidNotEnable("LOGURU is not installed")
+
+
+class LoggingLevels(enum.IntEnum):
+    TRACE = 5
+    DEBUG = 10
+    INFO = 20
+    SUCCESS = 25
+    WARNING = 30
+    ERROR = 40
+    CRITICAL = 50
+
+
+SENTRY_LEVEL_FROM_LOGURU_LEVEL = {
+    "TRACE": "DEBUG",
+    "DEBUG": "DEBUG",
+    "INFO": "INFO",
+    "SUCCESS": "INFO",
+    "WARNING": "WARNING",
+    "ERROR": "ERROR",
+    "CRITICAL": "CRITICAL",
+}
+
+DEFAULT_LEVEL = LoggingLevels.INFO.value
+DEFAULT_EVENT_LEVEL = LoggingLevels.ERROR.value
+# We need to save the handlers to be able to remove them later
+# in tests (they call `LoguruIntegration.__init__` multiple times,
+# and we can't use `setup_once` because it's called before
+# than we get configuration).
+_ADDED_HANDLERS = (None, None)  # type: Tuple[Optional[int], Optional[int]]
+
+
+class LoguruIntegration(Integration):
+    identifier = "loguru"
+
+    def __init__(
+        self,
+        level=DEFAULT_LEVEL,
+        event_level=DEFAULT_EVENT_LEVEL,
+        breadcrumb_format=DEFAULT_FORMAT,
+        event_format=DEFAULT_FORMAT,
+    ):
+        # type: (Optional[int], Optional[int], str | loguru.FormatFunction, str | loguru.FormatFunction) -> None
+        global _ADDED_HANDLERS
+        breadcrumb_handler, event_handler = _ADDED_HANDLERS
+
+        if breadcrumb_handler is not None:
+            logger.remove(breadcrumb_handler)
+            breadcrumb_handler = None
+        if event_handler is not None:
+            logger.remove(event_handler)
+            event_handler = None
+
+        if level is not None:
+            breadcrumb_handler = logger.add(
+                LoguruBreadcrumbHandler(level=level),
+                level=level,
+                format=breadcrumb_format,
+            )
+
+        if event_level is not None:
+            event_handler = logger.add(
+                LoguruEventHandler(level=event_level),
+                level=event_level,
+                format=event_format,
+            )
+
+        _ADDED_HANDLERS = (breadcrumb_handler, event_handler)
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        pass  # we do everything in __init__
+
+
+class _LoguruBaseHandler(_BaseHandler):
+    def _logging_to_event_level(self, record):
+        # type: (LogRecord) -> str
+        try:
+            return SENTRY_LEVEL_FROM_LOGURU_LEVEL[
+                LoggingLevels(record.levelno).name
+            ].lower()
+        except (ValueError, KeyError):
+            return record.levelname.lower() if record.levelname else ""
+
+
+class LoguruEventHandler(_LoguruBaseHandler, EventHandler):
+    """Modified version of :class:`sentry_sdk.integrations.logging.EventHandler` to use loguru's level names."""
+
+    def __init__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        if kwargs.get("level"):
+            kwargs["level"] = SENTRY_LEVEL_FROM_LOGURU_LEVEL.get(
+                kwargs.get("level", ""), DEFAULT_LEVEL
+            )
+
+        super().__init__(*args, **kwargs)
+
+
+class LoguruBreadcrumbHandler(_LoguruBaseHandler, BreadcrumbHandler):
+    """Modified version of :class:`sentry_sdk.integrations.logging.BreadcrumbHandler` to use loguru's level names."""
+
+    def __init__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        if kwargs.get("level"):
+            kwargs["level"] = SENTRY_LEVEL_FROM_LOGURU_LEVEL.get(
+                kwargs.get("level", ""), DEFAULT_LEVEL
+            )
+
+        super().__init__(*args, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/modules.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/modules.py
new file mode 100644
index 00000000..ce3ee786
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/modules.py
@@ -0,0 +1,29 @@
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.utils import _get_installed_modules
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from sentry_sdk._types import Event
+
+
+class ModulesIntegration(Integration):
+    identifier = "modules"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        @add_global_event_processor
+        def processor(event, hint):
+            # type: (Event, Any) -> Event
+            if event.get("type") == "transaction":
+                return event
+
+            if sentry_sdk.get_client().get_integration(ModulesIntegration) is None:
+                return event
+
+            event["modules"] = _get_installed_modules()
+            return event
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openai.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openai.py
new file mode 100644
index 00000000..61d335b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openai.py
@@ -0,0 +1,429 @@
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk import consts
+from sentry_sdk.ai.monitoring import record_token_usage
+from sentry_sdk.ai.utils import set_data_normalized
+from sentry_sdk.consts import SPANDATA
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator
+    from sentry_sdk.tracing import Span
+
+try:
+    from openai.resources.chat.completions import Completions, AsyncCompletions
+    from openai.resources import Embeddings, AsyncEmbeddings
+
+    if TYPE_CHECKING:
+        from openai.types.chat import ChatCompletionMessageParam, ChatCompletionChunk
+except ImportError:
+    raise DidNotEnable("OpenAI not installed")
+
+
+class OpenAIIntegration(Integration):
+    identifier = "openai"
+    origin = f"auto.ai.{identifier}"
+
+    def __init__(self, include_prompts=True, tiktoken_encoding_name=None):
+        # type: (OpenAIIntegration, bool, Optional[str]) -> None
+        self.include_prompts = include_prompts
+
+        self.tiktoken_encoding = None
+        if tiktoken_encoding_name is not None:
+            import tiktoken  # type: ignore
+
+            self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        Completions.create = _wrap_chat_completion_create(Completions.create)
+        Embeddings.create = _wrap_embeddings_create(Embeddings.create)
+
+        AsyncCompletions.create = _wrap_async_chat_completion_create(
+            AsyncCompletions.create
+        )
+        AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
+
+    def count_tokens(self, s):
+        # type: (OpenAIIntegration, str) -> int
+        if self.tiktoken_encoding is not None:
+            return len(self.tiktoken_encoding.encode_ordinary(s))
+        return 0
+
+
+def _capture_exception(exc):
+    # type: (Any) -> None
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "openai", "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _calculate_chat_completion_usage(
+    messages, response, span, streaming_message_responses, count_tokens
+):
+    # type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
+    completion_tokens = 0  # type: Optional[int]
+    prompt_tokens = 0  # type: Optional[int]
+    total_tokens = 0  # type: Optional[int]
+    if hasattr(response, "usage"):
+        if hasattr(response.usage, "completion_tokens") and isinstance(
+            response.usage.completion_tokens, int
+        ):
+            completion_tokens = response.usage.completion_tokens
+        if hasattr(response.usage, "prompt_tokens") and isinstance(
+            response.usage.prompt_tokens, int
+        ):
+            prompt_tokens = response.usage.prompt_tokens
+        if hasattr(response.usage, "total_tokens") and isinstance(
+            response.usage.total_tokens, int
+        ):
+            total_tokens = response.usage.total_tokens
+
+    if prompt_tokens == 0:
+        for message in messages:
+            if "content" in message:
+                prompt_tokens += count_tokens(message["content"])
+
+    if completion_tokens == 0:
+        if streaming_message_responses is not None:
+            for message in streaming_message_responses:
+                completion_tokens += count_tokens(message)
+        elif hasattr(response, "choices"):
+            for choice in response.choices:
+                if hasattr(choice, "message"):
+                    completion_tokens += count_tokens(choice.message)
+
+    if prompt_tokens == 0:
+        prompt_tokens = None
+    if completion_tokens == 0:
+        completion_tokens = None
+    if total_tokens == 0:
+        total_tokens = None
+    record_token_usage(span, prompt_tokens, completion_tokens, total_tokens)
+
+
+def _new_chat_completion_common(f, *args, **kwargs):
+    # type: (Any, *Any, **Any) -> Any
+    integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+    if integration is None:
+        return f(*args, **kwargs)
+
+    if "messages" not in kwargs:
+        # invalid call (in all versions of openai), let it return error
+        return f(*args, **kwargs)
+
+    try:
+        iter(kwargs["messages"])
+    except TypeError:
+        # invalid call (in all versions), messages must be iterable
+        return f(*args, **kwargs)
+
+    kwargs["messages"] = list(kwargs["messages"])
+    messages = kwargs["messages"]
+    model = kwargs.get("model")
+    streaming = kwargs.get("stream")
+
+    span = sentry_sdk.start_span(
+        op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE,
+        name="Chat Completion",
+        origin=OpenAIIntegration.origin,
+    )
+    span.__enter__()
+
+    res = yield f, args, kwargs
+
+    with capture_internal_exceptions():
+        if should_send_default_pii() and integration.include_prompts:
+            set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, messages)
+
+        set_data_normalized(span, SPANDATA.AI_MODEL_ID, model)
+        set_data_normalized(span, SPANDATA.AI_STREAMING, streaming)
+
+        if hasattr(res, "choices"):
+            if should_send_default_pii() and integration.include_prompts:
+                set_data_normalized(
+                    span,
+                    "ai.responses",
+                    list(map(lambda x: x.message, res.choices)),
+                )
+            _calculate_chat_completion_usage(
+                messages, res, span, None, integration.count_tokens
+            )
+            span.__exit__(None, None, None)
+        elif hasattr(res, "_iterator"):
+            data_buf: list[list[str]] = []  # one for each choice
+
+            old_iterator = res._iterator
+
+            def new_iterator():
+                # type: () -> Iterator[ChatCompletionChunk]
+                with capture_internal_exceptions():
+                    for x in old_iterator:
+                        if hasattr(x, "choices"):
+                            choice_index = 0
+                            for choice in x.choices:
+                                if hasattr(choice, "delta") and hasattr(
+                                    choice.delta, "content"
+                                ):
+                                    content = choice.delta.content
+                                    if len(data_buf) <= choice_index:
+                                        data_buf.append([])
+                                    data_buf[choice_index].append(content or "")
+                                choice_index += 1
+                        yield x
+                    if len(data_buf) > 0:
+                        all_responses = list(
+                            map(lambda chunk: "".join(chunk), data_buf)
+                        )
+                        if should_send_default_pii() and integration.include_prompts:
+                            set_data_normalized(
+                                span, SPANDATA.AI_RESPONSES, all_responses
+                            )
+                        _calculate_chat_completion_usage(
+                            messages,
+                            res,
+                            span,
+                            all_responses,
+                            integration.count_tokens,
+                        )
+                span.__exit__(None, None, None)
+
+            async def new_iterator_async():
+                # type: () -> AsyncIterator[ChatCompletionChunk]
+                with capture_internal_exceptions():
+                    async for x in old_iterator:
+                        if hasattr(x, "choices"):
+                            choice_index = 0
+                            for choice in x.choices:
+                                if hasattr(choice, "delta") and hasattr(
+                                    choice.delta, "content"
+                                ):
+                                    content = choice.delta.content
+                                    if len(data_buf) <= choice_index:
+                                        data_buf.append([])
+                                    data_buf[choice_index].append(content or "")
+                                choice_index += 1
+                        yield x
+                    if len(data_buf) > 0:
+                        all_responses = list(
+                            map(lambda chunk: "".join(chunk), data_buf)
+                        )
+                        if should_send_default_pii() and integration.include_prompts:
+                            set_data_normalized(
+                                span, SPANDATA.AI_RESPONSES, all_responses
+                            )
+                        _calculate_chat_completion_usage(
+                            messages,
+                            res,
+                            span,
+                            all_responses,
+                            integration.count_tokens,
+                        )
+                span.__exit__(None, None, None)
+
+            if str(type(res._iterator)) == "<class 'async_generator'>":
+                res._iterator = new_iterator_async()
+            else:
+                res._iterator = new_iterator()
+
+        else:
+            set_data_normalized(span, "unknown_response", True)
+            span.__exit__(None, None, None)
+    return res
+
+
+def _wrap_chat_completion_create(f):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+    def _execute_sync(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _new_chat_completion_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return e.value
+
+        try:
+            try:
+                result = f(*args, **kwargs)
+            except Exception as e:
+                _capture_exception(e)
+                raise e from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    def _sentry_patched_create_sync(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+        if integration is None or "messages" not in kwargs:
+            # no "messages" means invalid call (in all versions of openai), let it return error
+            return f(*args, **kwargs)
+
+        return _execute_sync(f, *args, **kwargs)
+
+    return _sentry_patched_create_sync
+
+
+def _wrap_async_chat_completion_create(f):
+    # type: (Callable[..., Any]) -> Callable[..., Any]
+    async def _execute_async(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _new_chat_completion_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return await e.value
+
+        try:
+            try:
+                result = await f(*args, **kwargs)
+            except Exception as e:
+                _capture_exception(e)
+                raise e from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    async def _sentry_patched_create_async(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+        if integration is None or "messages" not in kwargs:
+            # no "messages" means invalid call (in all versions of openai), let it return error
+            return await f(*args, **kwargs)
+
+        return await _execute_async(f, *args, **kwargs)
+
+    return _sentry_patched_create_async
+
+
+def _new_embeddings_create_common(f, *args, **kwargs):
+    # type: (Any, *Any, **Any) -> Any
+    integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+    if integration is None:
+        return f(*args, **kwargs)
+
+    with sentry_sdk.start_span(
+        op=consts.OP.OPENAI_EMBEDDINGS_CREATE,
+        description="OpenAI Embedding Creation",
+        origin=OpenAIIntegration.origin,
+    ) as span:
+        if "input" in kwargs and (
+            should_send_default_pii() and integration.include_prompts
+        ):
+            if isinstance(kwargs["input"], str):
+                set_data_normalized(span, "ai.input_messages", [kwargs["input"]])
+            elif (
+                isinstance(kwargs["input"], list)
+                and len(kwargs["input"]) > 0
+                and isinstance(kwargs["input"][0], str)
+            ):
+                set_data_normalized(span, "ai.input_messages", kwargs["input"])
+        if "model" in kwargs:
+            set_data_normalized(span, "ai.model_id", kwargs["model"])
+
+        response = yield f, args, kwargs
+
+        prompt_tokens = 0
+        total_tokens = 0
+        if hasattr(response, "usage"):
+            if hasattr(response.usage, "prompt_tokens") and isinstance(
+                response.usage.prompt_tokens, int
+            ):
+                prompt_tokens = response.usage.prompt_tokens
+            if hasattr(response.usage, "total_tokens") and isinstance(
+                response.usage.total_tokens, int
+            ):
+                total_tokens = response.usage.total_tokens
+
+        if prompt_tokens == 0:
+            prompt_tokens = integration.count_tokens(kwargs["input"] or "")
+
+        record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens)
+
+        return response
+
+
+def _wrap_embeddings_create(f):
+    # type: (Any) -> Any
+    def _execute_sync(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _new_embeddings_create_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return e.value
+
+        try:
+            try:
+                result = f(*args, **kwargs)
+            except Exception as e:
+                _capture_exception(e)
+                raise e from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    def _sentry_patched_create_sync(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+        if integration is None:
+            return f(*args, **kwargs)
+
+        return _execute_sync(f, *args, **kwargs)
+
+    return _sentry_patched_create_sync
+
+
+def _wrap_async_embeddings_create(f):
+    # type: (Any) -> Any
+    async def _execute_async(f, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        gen = _new_embeddings_create_common(f, *args, **kwargs)
+
+        try:
+            f, args, kwargs = next(gen)
+        except StopIteration as e:
+            return await e.value
+
+        try:
+            try:
+                result = await f(*args, **kwargs)
+            except Exception as e:
+                _capture_exception(e)
+                raise e from None
+
+            return gen.send(result)
+        except StopIteration as e:
+            return e.value
+
+    @wraps(f)
+    async def _sentry_patched_create_async(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
+        if integration is None:
+            return await f(*args, **kwargs)
+
+        return await _execute_async(f, *args, **kwargs)
+
+    return _sentry_patched_create_async
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openfeature.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openfeature.py
new file mode 100644
index 00000000..bf66b94e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/openfeature.py
@@ -0,0 +1,39 @@
+from typing import TYPE_CHECKING
+import sentry_sdk
+
+from sentry_sdk.integrations import DidNotEnable, Integration
+
+try:
+    from openfeature import api
+    from openfeature.hook import Hook
+
+    if TYPE_CHECKING:
+        from openfeature.flag_evaluation import FlagEvaluationDetails
+        from openfeature.hook import HookContext, HookHints
+except ImportError:
+    raise DidNotEnable("OpenFeature is not installed")
+
+
+class OpenFeatureIntegration(Integration):
+    identifier = "openfeature"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        # Register the hook within the global openfeature hooks list.
+        api.add_hooks(hooks=[OpenFeatureHook()])
+
+
+class OpenFeatureHook(Hook):
+
+    def after(self, hook_context, details, hints):
+        # type: (HookContext, FlagEvaluationDetails[bool], HookHints) -> None
+        if isinstance(details.value, bool):
+            flags = sentry_sdk.get_current_scope().flags
+            flags.set(details.flag_key, details.value)
+
+    def error(self, hook_context, exception, hints):
+        # type: (HookContext, Exception, HookHints) -> None
+        if isinstance(hook_context.default_value, bool):
+            flags = sentry_sdk.get_current_scope().flags
+            flags.set(hook_context.flag_key, hook_context.default_value)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/__init__.py
new file mode 100644
index 00000000..3c4c1a68
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/__init__.py
@@ -0,0 +1,7 @@
+from sentry_sdk.integrations.opentelemetry.span_processor import SentrySpanProcessor
+from sentry_sdk.integrations.opentelemetry.propagator import SentryPropagator
+
+__all__ = [
+    "SentryPropagator",
+    "SentrySpanProcessor",
+]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/consts.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/consts.py
new file mode 100644
index 00000000..ec493449
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/consts.py
@@ -0,0 +1,5 @@
+from opentelemetry.context import create_key
+
+
+SENTRY_TRACE_KEY = create_key("sentry-trace")
+SENTRY_BAGGAGE_KEY = create_key("sentry-baggage")
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/integration.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/integration.py
new file mode 100644
index 00000000..43e0396c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/integration.py
@@ -0,0 +1,58 @@
+"""
+IMPORTANT: The contents of this file are part of a proof of concept and as such
+are experimental and not suitable for production use. They may be changed or
+removed at any time without prior notice.
+"""
+
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.integrations.opentelemetry.propagator import SentryPropagator
+from sentry_sdk.integrations.opentelemetry.span_processor import SentrySpanProcessor
+from sentry_sdk.utils import logger
+
+try:
+    from opentelemetry import trace
+    from opentelemetry.propagate import set_global_textmap
+    from opentelemetry.sdk.trace import TracerProvider
+except ImportError:
+    raise DidNotEnable("opentelemetry not installed")
+
+try:
+    from opentelemetry.instrumentation.django import DjangoInstrumentor  # type: ignore[import-not-found]
+except ImportError:
+    DjangoInstrumentor = None
+
+
+CONFIGURABLE_INSTRUMENTATIONS = {
+    DjangoInstrumentor: {"is_sql_commentor_enabled": True},
+}
+
+
+class OpenTelemetryIntegration(Integration):
+    identifier = "opentelemetry"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        logger.warning(
+            "[OTel] Initializing highly experimental OpenTelemetry support. "
+            "Use at your own risk."
+        )
+
+        _setup_sentry_tracing()
+        # _setup_instrumentors()
+
+        logger.debug("[OTel] Finished setting up OpenTelemetry integration")
+
+
+def _setup_sentry_tracing():
+    # type: () -> None
+    provider = TracerProvider()
+    provider.add_span_processor(SentrySpanProcessor())
+    trace.set_tracer_provider(provider)
+    set_global_textmap(SentryPropagator())
+
+
+def _setup_instrumentors():
+    # type: () -> None
+    for instrumentor, kwargs in CONFIGURABLE_INSTRUMENTATIONS.items():
+        instrumentor().instrument(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/propagator.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/propagator.py
new file mode 100644
index 00000000..b84d582d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/propagator.py
@@ -0,0 +1,117 @@
+from opentelemetry import trace
+from opentelemetry.context import (
+    Context,
+    get_current,
+    set_value,
+)
+from opentelemetry.propagators.textmap import (
+    CarrierT,
+    Getter,
+    Setter,
+    TextMapPropagator,
+    default_getter,
+    default_setter,
+)
+from opentelemetry.trace import (
+    NonRecordingSpan,
+    SpanContext,
+    TraceFlags,
+)
+
+from sentry_sdk.integrations.opentelemetry.consts import (
+    SENTRY_BAGGAGE_KEY,
+    SENTRY_TRACE_KEY,
+)
+from sentry_sdk.integrations.opentelemetry.span_processor import (
+    SentrySpanProcessor,
+)
+from sentry_sdk.tracing import (
+    BAGGAGE_HEADER_NAME,
+    SENTRY_TRACE_HEADER_NAME,
+)
+from sentry_sdk.tracing_utils import Baggage, extract_sentrytrace_data
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional, Set
+
+
+class SentryPropagator(TextMapPropagator):
+    """
+    Propagates tracing headers for Sentry's tracing system in a way OTel understands.
+    """
+
+    def extract(self, carrier, context=None, getter=default_getter):
+        # type: (CarrierT, Optional[Context], Getter[CarrierT]) -> Context
+        if context is None:
+            context = get_current()
+
+        sentry_trace = getter.get(carrier, SENTRY_TRACE_HEADER_NAME)
+        if not sentry_trace:
+            return context
+
+        sentrytrace = extract_sentrytrace_data(sentry_trace[0])
+        if not sentrytrace:
+            return context
+
+        context = set_value(SENTRY_TRACE_KEY, sentrytrace, context)
+
+        trace_id, span_id = sentrytrace["trace_id"], sentrytrace["parent_span_id"]
+
+        span_context = SpanContext(
+            trace_id=int(trace_id, 16),  # type: ignore
+            span_id=int(span_id, 16),  # type: ignore
+            # we simulate a sampled trace on the otel side and leave the sampling to sentry
+            trace_flags=TraceFlags(TraceFlags.SAMPLED),
+            is_remote=True,
+        )
+
+        baggage_header = getter.get(carrier, BAGGAGE_HEADER_NAME)
+
+        if baggage_header:
+            baggage = Baggage.from_incoming_header(baggage_header[0])
+        else:
+            # If there's an incoming sentry-trace but no incoming baggage header,
+            # for instance in traces coming from older SDKs,
+            # baggage will be empty and frozen and won't be populated as head SDK.
+            baggage = Baggage(sentry_items={})
+
+        baggage.freeze()
+        context = set_value(SENTRY_BAGGAGE_KEY, baggage, context)
+
+        span = NonRecordingSpan(span_context)
+        modified_context = trace.set_span_in_context(span, context)
+        return modified_context
+
+    def inject(self, carrier, context=None, setter=default_setter):
+        # type: (CarrierT, Optional[Context], Setter[CarrierT]) -> None
+        if context is None:
+            context = get_current()
+
+        current_span = trace.get_current_span(context)
+        current_span_context = current_span.get_span_context()
+
+        if not current_span_context.is_valid:
+            return
+
+        span_id = trace.format_span_id(current_span_context.span_id)
+
+        span_map = SentrySpanProcessor().otel_span_map
+        sentry_span = span_map.get(span_id, None)
+        if not sentry_span:
+            return
+
+        setter.set(carrier, SENTRY_TRACE_HEADER_NAME, sentry_span.to_traceparent())
+
+        if sentry_span.containing_transaction:
+            baggage = sentry_span.containing_transaction.get_baggage()
+            if baggage:
+                baggage_data = baggage.serialize()
+                if baggage_data:
+                    setter.set(carrier, BAGGAGE_HEADER_NAME, baggage_data)
+
+    @property
+    def fields(self):
+        # type: () -> Set[str]
+        return {SENTRY_TRACE_HEADER_NAME, BAGGAGE_HEADER_NAME}
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/span_processor.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/span_processor.py
new file mode 100644
index 00000000..e00562a5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/opentelemetry/span_processor.py
@@ -0,0 +1,391 @@
+from datetime import datetime, timezone
+from time import time
+from typing import TYPE_CHECKING, cast
+
+from opentelemetry.context import get_value
+from opentelemetry.sdk.trace import SpanProcessor, ReadableSpan as OTelSpan
+from opentelemetry.semconv.trace import SpanAttributes
+from opentelemetry.trace import (
+    format_span_id,
+    format_trace_id,
+    get_current_span,
+    SpanKind,
+)
+from opentelemetry.trace.span import (
+    INVALID_SPAN_ID,
+    INVALID_TRACE_ID,
+)
+from sentry_sdk import get_client, start_transaction
+from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS
+from sentry_sdk.integrations.opentelemetry.consts import (
+    SENTRY_BAGGAGE_KEY,
+    SENTRY_TRACE_KEY,
+)
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.tracing import Transaction, Span as SentrySpan
+from sentry_sdk.utils import Dsn
+
+from urllib3.util import parse_url as urlparse
+
+if TYPE_CHECKING:
+    from typing import Any, Optional, Union
+    from opentelemetry import context as context_api
+    from sentry_sdk._types import Event, Hint
+
+OPEN_TELEMETRY_CONTEXT = "otel"
+SPAN_MAX_TIME_OPEN_MINUTES = 10
+SPAN_ORIGIN = "auto.otel"
+
+
+def link_trace_context_to_error_event(event, otel_span_map):
+    # type: (Event, dict[str, Union[Transaction, SentrySpan]]) -> Event
+    client = get_client()
+
+    if client.options["instrumenter"] != INSTRUMENTER.OTEL:
+        return event
+
+    if hasattr(event, "type") and event["type"] == "transaction":
+        return event
+
+    otel_span = get_current_span()
+    if not otel_span:
+        return event
+
+    ctx = otel_span.get_span_context()
+
+    if ctx.trace_id == INVALID_TRACE_ID or ctx.span_id == INVALID_SPAN_ID:
+        return event
+
+    sentry_span = otel_span_map.get(format_span_id(ctx.span_id), None)
+    if not sentry_span:
+        return event
+
+    contexts = event.setdefault("contexts", {})
+    contexts.setdefault("trace", {}).update(sentry_span.get_trace_context())
+
+    return event
+
+
+class SentrySpanProcessor(SpanProcessor):
+    """
+    Converts OTel spans into Sentry spans so they can be sent to the Sentry backend.
+    """
+
+    # The mapping from otel span ids to sentry spans
+    otel_span_map = {}  # type: dict[str, Union[Transaction, SentrySpan]]
+
+    # The currently open spans. Elements will be discarded after SPAN_MAX_TIME_OPEN_MINUTES
+    open_spans = {}  # type: dict[int, set[str]]
+
+    def __new__(cls):
+        # type: () -> SentrySpanProcessor
+        if not hasattr(cls, "instance"):
+            cls.instance = super().__new__(cls)
+
+        return cls.instance
+
+    def __init__(self):
+        # type: () -> None
+        @add_global_event_processor
+        def global_event_processor(event, hint):
+            # type: (Event, Hint) -> Event
+            return link_trace_context_to_error_event(event, self.otel_span_map)
+
+    def _prune_old_spans(self):
+        # type: (SentrySpanProcessor) -> None
+        """
+        Prune spans that have been open for too long.
+        """
+        current_time_minutes = int(time() / 60)
+        for span_start_minutes in list(
+            self.open_spans.keys()
+        ):  # making a list because we change the dict
+            # prune empty open spans buckets
+            if self.open_spans[span_start_minutes] == set():
+                self.open_spans.pop(span_start_minutes)
+
+            # prune old buckets
+            elif current_time_minutes - span_start_minutes > SPAN_MAX_TIME_OPEN_MINUTES:
+                for span_id in self.open_spans.pop(span_start_minutes):
+                    self.otel_span_map.pop(span_id, None)
+
+    def on_start(self, otel_span, parent_context=None):
+        # type: (OTelSpan, Optional[context_api.Context]) -> None
+        client = get_client()
+
+        if not client.dsn:
+            return
+
+        try:
+            _ = Dsn(client.dsn)
+        except Exception:
+            return
+
+        if client.options["instrumenter"] != INSTRUMENTER.OTEL:
+            return
+
+        if not otel_span.get_span_context().is_valid:
+            return
+
+        if self._is_sentry_span(otel_span):
+            return
+
+        trace_data = self._get_trace_data(otel_span, parent_context)
+
+        parent_span_id = trace_data["parent_span_id"]
+        sentry_parent_span = (
+            self.otel_span_map.get(parent_span_id) if parent_span_id else None
+        )
+
+        start_timestamp = None
+        if otel_span.start_time is not None:
+            start_timestamp = datetime.fromtimestamp(
+                otel_span.start_time / 1e9, timezone.utc
+            )  # OTel spans have nanosecond precision
+
+        sentry_span = None
+        if sentry_parent_span:
+            sentry_span = sentry_parent_span.start_child(
+                span_id=trace_data["span_id"],
+                name=otel_span.name,
+                start_timestamp=start_timestamp,
+                instrumenter=INSTRUMENTER.OTEL,
+                origin=SPAN_ORIGIN,
+            )
+        else:
+            sentry_span = start_transaction(
+                name=otel_span.name,
+                span_id=trace_data["span_id"],
+                parent_span_id=parent_span_id,
+                trace_id=trace_data["trace_id"],
+                baggage=trace_data["baggage"],
+                start_timestamp=start_timestamp,
+                instrumenter=INSTRUMENTER.OTEL,
+                origin=SPAN_ORIGIN,
+            )
+
+        self.otel_span_map[trace_data["span_id"]] = sentry_span
+
+        if otel_span.start_time is not None:
+            span_start_in_minutes = int(
+                otel_span.start_time / 1e9 / 60
+            )  # OTel spans have nanosecond precision
+            self.open_spans.setdefault(span_start_in_minutes, set()).add(
+                trace_data["span_id"]
+            )
+
+        self._prune_old_spans()
+
+    def on_end(self, otel_span):
+        # type: (OTelSpan) -> None
+        client = get_client()
+
+        if client.options["instrumenter"] != INSTRUMENTER.OTEL:
+            return
+
+        span_context = otel_span.get_span_context()
+        if not span_context.is_valid:
+            return
+
+        span_id = format_span_id(span_context.span_id)
+        sentry_span = self.otel_span_map.pop(span_id, None)
+        if not sentry_span:
+            return
+
+        sentry_span.op = otel_span.name
+
+        self._update_span_with_otel_status(sentry_span, otel_span)
+
+        if isinstance(sentry_span, Transaction):
+            sentry_span.name = otel_span.name
+            sentry_span.set_context(
+                OPEN_TELEMETRY_CONTEXT, self._get_otel_context(otel_span)
+            )
+            self._update_transaction_with_otel_data(sentry_span, otel_span)
+
+        else:
+            self._update_span_with_otel_data(sentry_span, otel_span)
+
+        end_timestamp = None
+        if otel_span.end_time is not None:
+            end_timestamp = datetime.fromtimestamp(
+                otel_span.end_time / 1e9, timezone.utc
+            )  # OTel spans have nanosecond precision
+
+        sentry_span.finish(end_timestamp=end_timestamp)
+
+        if otel_span.start_time is not None:
+            span_start_in_minutes = int(
+                otel_span.start_time / 1e9 / 60
+            )  # OTel spans have nanosecond precision
+            self.open_spans.setdefault(span_start_in_minutes, set()).discard(span_id)
+
+        self._prune_old_spans()
+
+    def _is_sentry_span(self, otel_span):
+        # type: (OTelSpan) -> bool
+        """
+        Break infinite loop:
+        HTTP requests to Sentry are caught by OTel and send again to Sentry.
+        """
+        otel_span_url = None
+        if otel_span.attributes is not None:
+            otel_span_url = otel_span.attributes.get(SpanAttributes.HTTP_URL)
+        otel_span_url = cast("Optional[str]", otel_span_url)
+
+        dsn_url = None
+        client = get_client()
+        if client.dsn:
+            dsn_url = Dsn(client.dsn).netloc
+
+        if otel_span_url and dsn_url and dsn_url in otel_span_url:
+            return True
+
+        return False
+
+    def _get_otel_context(self, otel_span):
+        # type: (OTelSpan) -> dict[str, Any]
+        """
+        Returns the OTel context for Sentry.
+        See: https://develop.sentry.dev/sdk/performance/opentelemetry/#step-5-add-opentelemetry-context
+        """
+        ctx = {}
+
+        if otel_span.attributes:
+            ctx["attributes"] = dict(otel_span.attributes)
+
+        if otel_span.resource.attributes:
+            ctx["resource"] = dict(otel_span.resource.attributes)
+
+        return ctx
+
+    def _get_trace_data(self, otel_span, parent_context):
+        # type: (OTelSpan, Optional[context_api.Context]) -> dict[str, Any]
+        """
+        Extracts tracing information from one OTel span and its parent OTel context.
+        """
+        trace_data = {}  # type: dict[str, Any]
+        span_context = otel_span.get_span_context()
+
+        span_id = format_span_id(span_context.span_id)
+        trace_data["span_id"] = span_id
+
+        trace_id = format_trace_id(span_context.trace_id)
+        trace_data["trace_id"] = trace_id
+
+        parent_span_id = (
+            format_span_id(otel_span.parent.span_id) if otel_span.parent else None
+        )
+        trace_data["parent_span_id"] = parent_span_id
+
+        sentry_trace_data = get_value(SENTRY_TRACE_KEY, parent_context)
+        sentry_trace_data = cast("dict[str, Union[str, bool, None]]", sentry_trace_data)
+        trace_data["parent_sampled"] = (
+            sentry_trace_data["parent_sampled"] if sentry_trace_data else None
+        )
+
+        baggage = get_value(SENTRY_BAGGAGE_KEY, parent_context)
+        trace_data["baggage"] = baggage
+
+        return trace_data
+
+    def _update_span_with_otel_status(self, sentry_span, otel_span):
+        # type: (SentrySpan, OTelSpan) -> None
+        """
+        Set the Sentry span status from the OTel span
+        """
+        if otel_span.status.is_unset:
+            return
+
+        if otel_span.status.is_ok:
+            sentry_span.set_status(SPANSTATUS.OK)
+            return
+
+        sentry_span.set_status(SPANSTATUS.INTERNAL_ERROR)
+
+    def _update_span_with_otel_data(self, sentry_span, otel_span):
+        # type: (SentrySpan, OTelSpan) -> None
+        """
+        Convert OTel span data and update the Sentry span with it.
+        This should eventually happen on the server when ingesting the spans.
+        """
+        sentry_span.set_data("otel.kind", otel_span.kind)
+
+        op = otel_span.name
+        description = otel_span.name
+
+        if otel_span.attributes is not None:
+            for key, val in otel_span.attributes.items():
+                sentry_span.set_data(key, val)
+
+            http_method = otel_span.attributes.get(SpanAttributes.HTTP_METHOD)
+            http_method = cast("Optional[str]", http_method)
+
+            db_query = otel_span.attributes.get(SpanAttributes.DB_SYSTEM)
+
+            if http_method:
+                op = "http"
+
+                if otel_span.kind == SpanKind.SERVER:
+                    op += ".server"
+                elif otel_span.kind == SpanKind.CLIENT:
+                    op += ".client"
+
+                description = http_method
+
+                peer_name = otel_span.attributes.get(SpanAttributes.NET_PEER_NAME, None)
+                if peer_name:
+                    description += " {}".format(peer_name)
+
+                target = otel_span.attributes.get(SpanAttributes.HTTP_TARGET, None)
+                if target:
+                    description += " {}".format(target)
+
+                if not peer_name and not target:
+                    url = otel_span.attributes.get(SpanAttributes.HTTP_URL, None)
+                    url = cast("Optional[str]", url)
+                    if url:
+                        parsed_url = urlparse(url)
+                        url = "{}://{}{}".format(
+                            parsed_url.scheme, parsed_url.netloc, parsed_url.path
+                        )
+                        description += " {}".format(url)
+
+                status_code = otel_span.attributes.get(
+                    SpanAttributes.HTTP_STATUS_CODE, None
+                )
+                status_code = cast("Optional[int]", status_code)
+                if status_code:
+                    sentry_span.set_http_status(status_code)
+
+            elif db_query:
+                op = "db"
+                statement = otel_span.attributes.get(SpanAttributes.DB_STATEMENT, None)
+                statement = cast("Optional[str]", statement)
+                if statement:
+                    description = statement
+
+        sentry_span.op = op
+        sentry_span.description = description
+
+    def _update_transaction_with_otel_data(self, sentry_span, otel_span):
+        # type: (SentrySpan, OTelSpan) -> None
+        if otel_span.attributes is None:
+            return
+
+        http_method = otel_span.attributes.get(SpanAttributes.HTTP_METHOD)
+
+        if http_method:
+            status_code = otel_span.attributes.get(SpanAttributes.HTTP_STATUS_CODE)
+            status_code = cast("Optional[int]", status_code)
+            if status_code:
+                sentry_span.set_http_status(status_code)
+
+            op = "http"
+
+            if otel_span.kind == SpanKind.SERVER:
+                op += ".server"
+            elif otel_span.kind == SpanKind.CLIENT:
+                op += ".client"
+
+            sentry_span.op = op
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pure_eval.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pure_eval.py
new file mode 100644
index 00000000..c1c3d638
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pure_eval.py
@@ -0,0 +1,139 @@
+import ast
+
+import sentry_sdk
+from sentry_sdk import serializer
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.utils import walk_exception_chain, iter_stacks
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional, Dict, Any, Tuple, List
+    from types import FrameType
+
+    from sentry_sdk._types import Event, Hint
+
+try:
+    import executing
+except ImportError:
+    raise DidNotEnable("executing is not installed")
+
+try:
+    import pure_eval
+except ImportError:
+    raise DidNotEnable("pure_eval is not installed")
+
+try:
+    # Used implicitly, just testing it's available
+    import asttokens  # noqa
+except ImportError:
+    raise DidNotEnable("asttokens is not installed")
+
+
+class PureEvalIntegration(Integration):
+    identifier = "pure_eval"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        @add_global_event_processor
+        def add_executing_info(event, hint):
+            # type: (Event, Optional[Hint]) -> Optional[Event]
+            if sentry_sdk.get_client().get_integration(PureEvalIntegration) is None:
+                return event
+
+            if hint is None:
+                return event
+
+            exc_info = hint.get("exc_info", None)
+
+            if exc_info is None:
+                return event
+
+            exception = event.get("exception", None)
+
+            if exception is None:
+                return event
+
+            values = exception.get("values", None)
+
+            if values is None:
+                return event
+
+            for exception, (_exc_type, _exc_value, exc_tb) in zip(
+                reversed(values), walk_exception_chain(exc_info)
+            ):
+                sentry_frames = [
+                    frame
+                    for frame in exception.get("stacktrace", {}).get("frames", [])
+                    if frame.get("function")
+                ]
+                tbs = list(iter_stacks(exc_tb))
+                if len(sentry_frames) != len(tbs):
+                    continue
+
+                for sentry_frame, tb in zip(sentry_frames, tbs):
+                    sentry_frame["vars"] = (
+                        pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
+                    )
+            return event
+
+
+def pure_eval_frame(frame):
+    # type: (FrameType) -> Dict[str, Any]
+    source = executing.Source.for_frame(frame)
+    if not source.tree:
+        return {}
+
+    statements = source.statements_at_line(frame.f_lineno)
+    if not statements:
+        return {}
+
+    scope = stmt = list(statements)[0]
+    while True:
+        # Get the parent first in case the original statement is already
+        # a function definition, e.g. if we're calling a decorator
+        # In that case we still want the surrounding scope, not that function
+        scope = scope.parent
+        if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
+            break
+
+    evaluator = pure_eval.Evaluator.from_frame(frame)
+    expressions = evaluator.interesting_expressions_grouped(scope)
+
+    def closeness(expression):
+        # type: (Tuple[List[Any], Any]) -> Tuple[int, int]
+        # Prioritise expressions with a node closer to the statement executed
+        # without being after that statement
+        # A higher return value is better - the expression will appear
+        # earlier in the list of values and is less likely to be trimmed
+        nodes, _value = expression
+
+        def start(n):
+            # type: (ast.expr) -> Tuple[int, int]
+            return (n.lineno, n.col_offset)
+
+        nodes_before_stmt = [
+            node for node in nodes if start(node) < stmt.last_token.end  # type: ignore
+        ]
+        if nodes_before_stmt:
+            # The position of the last node before or in the statement
+            return max(start(node) for node in nodes_before_stmt)
+        else:
+            # The position of the first node after the statement
+            # Negative means it's always lower priority than nodes that come before
+            # Less negative means closer to the statement and higher priority
+            lineno, col_offset = min(start(node) for node in nodes)
+            return (-lineno, -col_offset)
+
+    # This adds the first_token and last_token attributes to nodes
+    atok = source.asttokens()
+
+    expressions.sort(key=closeness, reverse=True)
+    vars = {
+        atok.get_text(nodes[0]): value
+        for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
+    }
+    return serializer.serialize(vars, is_vars=True)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pymongo.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pymongo.py
new file mode 100644
index 00000000..f65ad736
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pymongo.py
@@ -0,0 +1,214 @@
+import copy
+import json
+
+import sentry_sdk
+from sentry_sdk.consts import SPANSTATUS, SPANDATA, OP
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import capture_internal_exceptions
+
+try:
+    from pymongo import monitoring
+except ImportError:
+    raise DidNotEnable("Pymongo not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Dict, Union
+
+    from pymongo.monitoring import (
+        CommandFailedEvent,
+        CommandStartedEvent,
+        CommandSucceededEvent,
+    )
+
+
+SAFE_COMMAND_ATTRIBUTES = [
+    "insert",
+    "ordered",
+    "find",
+    "limit",
+    "singleBatch",
+    "aggregate",
+    "createIndexes",
+    "indexes",
+    "delete",
+    "findAndModify",
+    "renameCollection",
+    "to",
+    "drop",
+]
+
+
+def _strip_pii(command):
+    # type: (Dict[str, Any]) -> Dict[str, Any]
+    for key in command:
+        is_safe_field = key in SAFE_COMMAND_ATTRIBUTES
+        if is_safe_field:
+            # Skip if safe key
+            continue
+
+        update_db_command = key == "update" and "findAndModify" not in command
+        if update_db_command:
+            # Also skip "update" db command because it is save.
+            # There is also an "update" key in the "findAndModify" command, which is NOT safe!
+            continue
+
+        # Special stripping for documents
+        is_document = key == "documents"
+        if is_document:
+            for doc in command[key]:
+                for doc_key in doc:
+                    doc[doc_key] = "%s"
+            continue
+
+        # Special stripping for dict style fields
+        is_dict_field = key in ["filter", "query", "update"]
+        if is_dict_field:
+            for item_key in command[key]:
+                command[key][item_key] = "%s"
+            continue
+
+        # For pipeline fields strip the `$match` dict
+        is_pipeline_field = key == "pipeline"
+        if is_pipeline_field:
+            for pipeline in command[key]:
+                for match_key in pipeline["$match"] if "$match" in pipeline else []:
+                    pipeline["$match"][match_key] = "%s"
+            continue
+
+        # Default stripping
+        command[key] = "%s"
+
+    return command
+
+
+def _get_db_data(event):
+    # type: (Any) -> Dict[str, Any]
+    data = {}
+
+    data[SPANDATA.DB_SYSTEM] = "mongodb"
+
+    db_name = event.database_name
+    if db_name is not None:
+        data[SPANDATA.DB_NAME] = db_name
+
+    server_address = event.connection_id[0]
+    if server_address is not None:
+        data[SPANDATA.SERVER_ADDRESS] = server_address
+
+    server_port = event.connection_id[1]
+    if server_port is not None:
+        data[SPANDATA.SERVER_PORT] = server_port
+
+    return data
+
+
+class CommandTracer(monitoring.CommandListener):
+    def __init__(self):
+        # type: () -> None
+        self._ongoing_operations = {}  # type: Dict[int, Span]
+
+    def _operation_key(self, event):
+        # type: (Union[CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent]) -> int
+        return event.request_id
+
+    def started(self, event):
+        # type: (CommandStartedEvent) -> None
+        if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
+            return
+
+        with capture_internal_exceptions():
+            command = dict(copy.deepcopy(event.command))
+
+            command.pop("$db", None)
+            command.pop("$clusterTime", None)
+            command.pop("$signature", None)
+
+            tags = {
+                "db.name": event.database_name,
+                SPANDATA.DB_SYSTEM: "mongodb",
+                SPANDATA.DB_OPERATION: event.command_name,
+                SPANDATA.DB_MONGODB_COLLECTION: command.get(event.command_name),
+            }
+
+            try:
+                tags["net.peer.name"] = event.connection_id[0]
+                tags["net.peer.port"] = str(event.connection_id[1])
+            except TypeError:
+                pass
+
+            data = {"operation_ids": {}}  # type: Dict[str, Any]
+            data["operation_ids"]["operation"] = event.operation_id
+            data["operation_ids"]["request"] = event.request_id
+
+            data.update(_get_db_data(event))
+
+            try:
+                lsid = command.pop("lsid")["id"]
+                data["operation_ids"]["session"] = str(lsid)
+            except KeyError:
+                pass
+
+            if not should_send_default_pii():
+                command = _strip_pii(command)
+
+            query = json.dumps(command, default=str)
+            span = sentry_sdk.start_span(
+                op=OP.DB,
+                name=query,
+                origin=PyMongoIntegration.origin,
+            )
+
+            for tag, value in tags.items():
+                # set the tag for backwards-compatibility.
+                # TODO: remove the set_tag call in the next major release!
+                span.set_tag(tag, value)
+
+                span.set_data(tag, value)
+
+            for key, value in data.items():
+                span.set_data(key, value)
+
+            with capture_internal_exceptions():
+                sentry_sdk.add_breadcrumb(
+                    message=query, category="query", type=OP.DB, data=tags
+                )
+
+            self._ongoing_operations[self._operation_key(event)] = span.__enter__()
+
+    def failed(self, event):
+        # type: (CommandFailedEvent) -> None
+        if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
+            return
+
+        try:
+            span = self._ongoing_operations.pop(self._operation_key(event))
+            span.set_status(SPANSTATUS.INTERNAL_ERROR)
+            span.__exit__(None, None, None)
+        except KeyError:
+            return
+
+    def succeeded(self, event):
+        # type: (CommandSucceededEvent) -> None
+        if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
+            return
+
+        try:
+            span = self._ongoing_operations.pop(self._operation_key(event))
+            span.set_status(SPANSTATUS.OK)
+            span.__exit__(None, None, None)
+        except KeyError:
+            pass
+
+
+class PyMongoIntegration(Integration):
+    identifier = "pymongo"
+    origin = f"auto.db.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        monitoring.register(CommandTracer())
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pyramid.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pyramid.py
new file mode 100644
index 00000000..d1475ada
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/pyramid.py
@@ -0,0 +1,229 @@
+import functools
+import os
+import sys
+import weakref
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    reraise,
+)
+
+try:
+    from pyramid.httpexceptions import HTTPException
+    from pyramid.request import Request
+except ImportError:
+    raise DidNotEnable("Pyramid not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from pyramid.response import Response
+    from typing import Any
+    from sentry_sdk.integrations.wsgi import _ScopedResponse
+    from typing import Callable
+    from typing import Dict
+    from typing import Optional
+    from webob.cookies import RequestCookies
+    from webob.request import _FieldStorageWithFile
+
+    from sentry_sdk.utils import ExcInfo
+    from sentry_sdk._types import Event, EventProcessor
+
+
+if getattr(Request, "authenticated_userid", None):
+
+    def authenticated_userid(request):
+        # type: (Request) -> Optional[Any]
+        return request.authenticated_userid
+
+else:
+    # bw-compat for pyramid < 1.5
+    from pyramid.security import authenticated_userid  # type: ignore
+
+
+TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
+
+
+class PyramidIntegration(Integration):
+    identifier = "pyramid"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(self, transaction_style="route_name"):
+        # type: (str) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        from pyramid import router
+
+        old_call_view = router._call_view
+
+        @functools.wraps(old_call_view)
+        def sentry_patched_call_view(registry, request, *args, **kwargs):
+            # type: (Any, Request, *Any, **Any) -> Response
+            integration = sentry_sdk.get_client().get_integration(PyramidIntegration)
+            if integration is None:
+                return old_call_view(registry, request, *args, **kwargs)
+
+            _set_transaction_name_and_source(
+                sentry_sdk.get_current_scope(), integration.transaction_style, request
+            )
+            scope = sentry_sdk.get_isolation_scope()
+            scope.add_event_processor(
+                _make_event_processor(weakref.ref(request), integration)
+            )
+
+            return old_call_view(registry, request, *args, **kwargs)
+
+        router._call_view = sentry_patched_call_view
+
+        if hasattr(Request, "invoke_exception_view"):
+            old_invoke_exception_view = Request.invoke_exception_view
+
+            def sentry_patched_invoke_exception_view(self, *args, **kwargs):
+                # type: (Request, *Any, **Any) -> Any
+                rv = old_invoke_exception_view(self, *args, **kwargs)
+
+                if (
+                    self.exc_info
+                    and all(self.exc_info)
+                    and rv.status_int == 500
+                    and sentry_sdk.get_client().get_integration(PyramidIntegration)
+                    is not None
+                ):
+                    _capture_exception(self.exc_info)
+
+                return rv
+
+            Request.invoke_exception_view = sentry_patched_invoke_exception_view
+
+        old_wsgi_call = router.Router.__call__
+
+        @ensure_integration_enabled(PyramidIntegration, old_wsgi_call)
+        def sentry_patched_wsgi_call(self, environ, start_response):
+            # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+            def sentry_patched_inner_wsgi_call(environ, start_response):
+                # type: (Dict[str, Any], Callable[..., Any]) -> Any
+                try:
+                    return old_wsgi_call(self, environ, start_response)
+                except Exception:
+                    einfo = sys.exc_info()
+                    _capture_exception(einfo)
+                    reraise(*einfo)
+
+            middleware = SentryWsgiMiddleware(
+                sentry_patched_inner_wsgi_call,
+                span_origin=PyramidIntegration.origin,
+            )
+            return middleware(environ, start_response)
+
+        router.Router.__call__ = sentry_patched_wsgi_call
+
+
+@ensure_integration_enabled(PyramidIntegration)
+def _capture_exception(exc_info):
+    # type: (ExcInfo) -> None
+    if exc_info[0] is None or issubclass(exc_info[0], HTTPException):
+        return
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "pyramid", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, Request) -> None
+    try:
+        name_for_style = {
+            "route_name": request.matched_route.name,
+            "route_pattern": request.matched_route.pattern,
+        }
+        scope.set_transaction_name(
+            name_for_style[transaction_style],
+            source=SOURCE_FOR_STYLE[transaction_style],
+        )
+    except Exception:
+        pass
+
+
+class PyramidRequestExtractor(RequestExtractor):
+    def url(self):
+        # type: () -> str
+        return self.request.path_url
+
+    def env(self):
+        # type: () -> Dict[str, str]
+        return self.request.environ
+
+    def cookies(self):
+        # type: () -> RequestCookies
+        return self.request.cookies
+
+    def raw_data(self):
+        # type: () -> str
+        return self.request.text
+
+    def form(self):
+        # type: () -> Dict[str, str]
+        return {
+            key: value
+            for key, value in self.request.POST.items()
+            if not getattr(value, "filename", None)
+        }
+
+    def files(self):
+        # type: () -> Dict[str, _FieldStorageWithFile]
+        return {
+            key: value
+            for key, value in self.request.POST.items()
+            if getattr(value, "filename", None)
+        }
+
+    def size_of_file(self, postdata):
+        # type: (_FieldStorageWithFile) -> int
+        file = postdata.file
+        try:
+            return os.fstat(file.fileno()).st_size
+        except Exception:
+            return 0
+
+
+def _make_event_processor(weak_request, integration):
+    # type: (Callable[[], Request], PyramidIntegration) -> EventProcessor
+    def pyramid_event_processor(event, hint):
+        # type: (Event, Dict[str, Any]) -> Event
+        request = weak_request()
+        if request is None:
+            return event
+
+        with capture_internal_exceptions():
+            PyramidRequestExtractor(request).extract_into_event(event)
+
+        if should_send_default_pii():
+            with capture_internal_exceptions():
+                user_info = event.setdefault("user", {})
+                user_info.setdefault("id", authenticated_userid(request))
+
+        return event
+
+    return pyramid_event_processor
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/quart.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/quart.py
new file mode 100644
index 00000000..51306bb4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/quart.py
@@ -0,0 +1,237 @@
+import asyncio
+import inspect
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import SOURCE_FOR_STYLE
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+)
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Union
+
+    from sentry_sdk._types import Event, EventProcessor
+
+try:
+    import quart_auth  # type: ignore
+except ImportError:
+    quart_auth = None
+
+try:
+    from quart import (  # type: ignore
+        has_request_context,
+        has_websocket_context,
+        Request,
+        Quart,
+        request,
+        websocket,
+    )
+    from quart.signals import (  # type: ignore
+        got_background_exception,
+        got_request_exception,
+        got_websocket_exception,
+        request_started,
+        websocket_started,
+    )
+except ImportError:
+    raise DidNotEnable("Quart is not installed")
+else:
+    # Quart 0.19 is based on Flask and hence no longer has a Scaffold
+    try:
+        from quart.scaffold import Scaffold  # type: ignore
+    except ImportError:
+        from flask.sansio.scaffold import Scaffold  # type: ignore
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class QuartIntegration(Integration):
+    identifier = "quart"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(self, transaction_style="endpoint"):
+        # type: (str) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+
+        request_started.connect(_request_websocket_started)
+        websocket_started.connect(_request_websocket_started)
+        got_background_exception.connect(_capture_exception)
+        got_request_exception.connect(_capture_exception)
+        got_websocket_exception.connect(_capture_exception)
+
+        patch_asgi_app()
+        patch_scaffold_route()
+
+
+def patch_asgi_app():
+    # type: () -> None
+    old_app = Quart.__call__
+
+    async def sentry_patched_asgi_app(self, scope, receive, send):
+        # type: (Any, Any, Any, Any) -> Any
+        if sentry_sdk.get_client().get_integration(QuartIntegration) is None:
+            return await old_app(self, scope, receive, send)
+
+        middleware = SentryAsgiMiddleware(
+            lambda *a, **kw: old_app(self, *a, **kw),
+            span_origin=QuartIntegration.origin,
+        )
+        middleware.__call__ = middleware._run_asgi3
+        return await middleware(scope, receive, send)
+
+    Quart.__call__ = sentry_patched_asgi_app
+
+
+def patch_scaffold_route():
+    # type: () -> None
+    old_route = Scaffold.route
+
+    def _sentry_route(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        old_decorator = old_route(*args, **kwargs)
+
+        def decorator(old_func):
+            # type: (Any) -> Any
+
+            if inspect.isfunction(old_func) and not asyncio.iscoroutinefunction(
+                old_func
+            ):
+
+                @wraps(old_func)
+                @ensure_integration_enabled(QuartIntegration, old_func)
+                def _sentry_func(*args, **kwargs):
+                    # type: (*Any, **Any) -> Any
+                    current_scope = sentry_sdk.get_current_scope()
+                    if current_scope.transaction is not None:
+                        current_scope.transaction.update_active_thread()
+
+                    sentry_scope = sentry_sdk.get_isolation_scope()
+                    if sentry_scope.profile is not None:
+                        sentry_scope.profile.update_active_thread_id()
+
+                    return old_func(*args, **kwargs)
+
+                return old_decorator(_sentry_func)
+
+            return old_decorator(old_func)
+
+        return decorator
+
+    Scaffold.route = _sentry_route
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, Request) -> None
+
+    try:
+        name_for_style = {
+            "url": request.url_rule.rule,
+            "endpoint": request.url_rule.endpoint,
+        }
+        scope.set_transaction_name(
+            name_for_style[transaction_style],
+            source=SOURCE_FOR_STYLE[transaction_style],
+        )
+    except Exception:
+        pass
+
+
+async def _request_websocket_started(app, **kwargs):
+    # type: (Quart, **Any) -> None
+    integration = sentry_sdk.get_client().get_integration(QuartIntegration)
+    if integration is None:
+        return
+
+    if has_request_context():
+        request_websocket = request._get_current_object()
+    if has_websocket_context():
+        request_websocket = websocket._get_current_object()
+
+    # Set the transaction name here, but rely on ASGI middleware
+    # to actually start the transaction
+    _set_transaction_name_and_source(
+        sentry_sdk.get_current_scope(), integration.transaction_style, request_websocket
+    )
+
+    scope = sentry_sdk.get_isolation_scope()
+    evt_processor = _make_request_event_processor(app, request_websocket, integration)
+    scope.add_event_processor(evt_processor)
+
+
+def _make_request_event_processor(app, request, integration):
+    # type: (Quart, Request, QuartIntegration) -> EventProcessor
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        # if the request is gone we are fine not logging the data from
+        # it.  This might happen if the processor is pushed away to
+        # another thread.
+        if request is None:
+            return event
+
+        with capture_internal_exceptions():
+            # TODO: Figure out what to do with request body. Methods on request
+            # are async, but event processors are not.
+
+            request_info = event.setdefault("request", {})
+            request_info["url"] = request.url
+            request_info["query_string"] = request.query_string
+            request_info["method"] = request.method
+            request_info["headers"] = _filter_headers(dict(request.headers))
+
+            if should_send_default_pii():
+                request_info["env"] = {"REMOTE_ADDR": request.access_route[0]}
+                _add_user_to_event(event)
+
+        return event
+
+    return inner
+
+
+async def _capture_exception(sender, exception, **kwargs):
+    # type: (Quart, Union[ValueError, BaseException], **Any) -> None
+    integration = sentry_sdk.get_client().get_integration(QuartIntegration)
+    if integration is None:
+        return
+
+    event, hint = event_from_exception(
+        exception,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "quart", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _add_user_to_event(event):
+    # type: (Event) -> None
+    if quart_auth is None:
+        return
+
+    user = quart_auth.current_user
+    if user is None:
+        return
+
+    with capture_internal_exceptions():
+        user_info = event.setdefault("user", {})
+
+        user_info["id"] = quart_auth.current_user._auth_id
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ray.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ray.py
new file mode 100644
index 00000000..0842b922
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/ray.py
@@ -0,0 +1,141 @@
+import inspect
+import sys
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANSTATUS
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    event_from_exception,
+    logger,
+    package_version,
+    qualname_from_function,
+    reraise,
+)
+
+try:
+    import ray  # type: ignore[import-not-found]
+except ImportError:
+    raise DidNotEnable("Ray not installed.")
+import functools
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import Any, Optional
+    from sentry_sdk.utils import ExcInfo
+
+
+def _check_sentry_initialized():
+    # type: () -> None
+    if sentry_sdk.get_client().is_active():
+        return
+
+    logger.debug(
+        "[Tracing] Sentry not initialized in ray cluster worker, performance data will be discarded."
+    )
+
+
+def _patch_ray_remote():
+    # type: () -> None
+    old_remote = ray.remote
+
+    @functools.wraps(old_remote)
+    def new_remote(f, *args, **kwargs):
+        # type: (Callable[..., Any], *Any, **Any) -> Callable[..., Any]
+        if inspect.isclass(f):
+            # Ray Actors
+            # (https://docs.ray.io/en/latest/ray-core/actors.html)
+            # are not supported
+            # (Only Ray Tasks are supported)
+            return old_remote(f, *args, *kwargs)
+
+        def _f(*f_args, _tracing=None, **f_kwargs):
+            # type: (Any, Optional[dict[str, Any]],  Any) -> Any
+            """
+            Ray Worker
+            """
+            _check_sentry_initialized()
+
+            transaction = sentry_sdk.continue_trace(
+                _tracing or {},
+                op=OP.QUEUE_TASK_RAY,
+                name=qualname_from_function(f),
+                origin=RayIntegration.origin,
+                source=TransactionSource.TASK,
+            )
+
+            with sentry_sdk.start_transaction(transaction) as transaction:
+                try:
+                    result = f(*f_args, **f_kwargs)
+                    transaction.set_status(SPANSTATUS.OK)
+                except Exception:
+                    transaction.set_status(SPANSTATUS.INTERNAL_ERROR)
+                    exc_info = sys.exc_info()
+                    _capture_exception(exc_info)
+                    reraise(*exc_info)
+
+                return result
+
+        rv = old_remote(_f, *args, *kwargs)
+        old_remote_method = rv.remote
+
+        def _remote_method_with_header_propagation(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            """
+            Ray Client
+            """
+            with sentry_sdk.start_span(
+                op=OP.QUEUE_SUBMIT_RAY,
+                name=qualname_from_function(f),
+                origin=RayIntegration.origin,
+            ) as span:
+                tracing = {
+                    k: v
+                    for k, v in sentry_sdk.get_current_scope().iter_trace_propagation_headers()
+                }
+                try:
+                    result = old_remote_method(*args, **kwargs, _tracing=tracing)
+                    span.set_status(SPANSTATUS.OK)
+                except Exception:
+                    span.set_status(SPANSTATUS.INTERNAL_ERROR)
+                    exc_info = sys.exc_info()
+                    _capture_exception(exc_info)
+                    reraise(*exc_info)
+
+                return result
+
+        rv.remote = _remote_method_with_header_propagation
+
+        return rv
+
+    ray.remote = new_remote
+
+
+def _capture_exception(exc_info, **kwargs):
+    # type: (ExcInfo, **Any) -> None
+    client = sentry_sdk.get_client()
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=client.options,
+        mechanism={
+            "handled": False,
+            "type": RayIntegration.identifier,
+        },
+    )
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+class RayIntegration(Integration):
+    identifier = "ray"
+    origin = f"auto.queue.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = package_version("ray")
+        _check_minimum_version(RayIntegration, version)
+
+        _patch_ray_remote()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/__init__.py
new file mode 100644
index 00000000..f4431382
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/__init__.py
@@ -0,0 +1,38 @@
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.redis.consts import _DEFAULT_MAX_DATA_SIZE
+from sentry_sdk.integrations.redis.rb import _patch_rb
+from sentry_sdk.integrations.redis.redis import _patch_redis
+from sentry_sdk.integrations.redis.redis_cluster import _patch_redis_cluster
+from sentry_sdk.integrations.redis.redis_py_cluster_legacy import _patch_rediscluster
+from sentry_sdk.utils import logger
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+
+
+class RedisIntegration(Integration):
+    identifier = "redis"
+
+    def __init__(self, max_data_size=_DEFAULT_MAX_DATA_SIZE, cache_prefixes=None):
+        # type: (int, Optional[list[str]]) -> None
+        self.max_data_size = max_data_size
+        self.cache_prefixes = cache_prefixes if cache_prefixes is not None else []
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        try:
+            from redis import StrictRedis, client
+        except ImportError:
+            raise DidNotEnable("Redis client not installed")
+
+        _patch_redis(StrictRedis, client)
+        _patch_redis_cluster()
+        _patch_rb()
+
+        try:
+            _patch_rediscluster()
+        except Exception:
+            logger.exception("Error occurred while patching `rediscluster` library")
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_async_common.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_async_common.py
new file mode 100644
index 00000000..196e85e7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_async_common.py
@@ -0,0 +1,108 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations.redis.consts import SPAN_ORIGIN
+from sentry_sdk.integrations.redis.modules.caches import (
+    _compile_cache_span_properties,
+    _set_cache_data,
+)
+from sentry_sdk.integrations.redis.modules.queries import _compile_db_span_properties
+from sentry_sdk.integrations.redis.utils import (
+    _set_client_data,
+    _set_pipeline_data,
+)
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import Any, Union
+    from redis.asyncio.client import Pipeline, StrictRedis
+    from redis.asyncio.cluster import ClusterPipeline, RedisCluster
+
+
+def patch_redis_async_pipeline(
+    pipeline_cls, is_cluster, get_command_args_fn, set_db_data_fn
+):
+    # type: (Union[type[Pipeline[Any]], type[ClusterPipeline[Any]]], bool, Any, Callable[[Span, Any], None]) -> None
+    old_execute = pipeline_cls.execute
+
+    from sentry_sdk.integrations.redis import RedisIntegration
+
+    async def _sentry_execute(self, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        if sentry_sdk.get_client().get_integration(RedisIntegration) is None:
+            return await old_execute(self, *args, **kwargs)
+
+        with sentry_sdk.start_span(
+            op=OP.DB_REDIS,
+            name="redis.pipeline.execute",
+            origin=SPAN_ORIGIN,
+        ) as span:
+            with capture_internal_exceptions():
+                set_db_data_fn(span, self)
+                _set_pipeline_data(
+                    span,
+                    is_cluster,
+                    get_command_args_fn,
+                    False if is_cluster else self.is_transaction,
+                    self._command_stack if is_cluster else self.command_stack,
+                )
+
+            return await old_execute(self, *args, **kwargs)
+
+    pipeline_cls.execute = _sentry_execute  # type: ignore
+
+
+def patch_redis_async_client(cls, is_cluster, set_db_data_fn):
+    # type: (Union[type[StrictRedis[Any]], type[RedisCluster[Any]]], bool, Callable[[Span, Any], None]) -> None
+    old_execute_command = cls.execute_command
+
+    from sentry_sdk.integrations.redis import RedisIntegration
+
+    async def _sentry_execute_command(self, name, *args, **kwargs):
+        # type: (Any, str, *Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(RedisIntegration)
+        if integration is None:
+            return await old_execute_command(self, name, *args, **kwargs)
+
+        cache_properties = _compile_cache_span_properties(
+            name,
+            args,
+            kwargs,
+            integration,
+        )
+
+        cache_span = None
+        if cache_properties["is_cache_key"] and cache_properties["op"] is not None:
+            cache_span = sentry_sdk.start_span(
+                op=cache_properties["op"],
+                name=cache_properties["description"],
+                origin=SPAN_ORIGIN,
+            )
+            cache_span.__enter__()
+
+        db_properties = _compile_db_span_properties(integration, name, args)
+
+        db_span = sentry_sdk.start_span(
+            op=db_properties["op"],
+            name=db_properties["description"],
+            origin=SPAN_ORIGIN,
+        )
+        db_span.__enter__()
+
+        set_db_data_fn(db_span, self)
+        _set_client_data(db_span, is_cluster, name, *args)
+
+        value = await old_execute_command(self, name, *args, **kwargs)
+
+        db_span.__exit__(None, None, None)
+
+        if cache_span:
+            _set_cache_data(cache_span, self, cache_properties, value)
+            cache_span.__exit__(None, None, None)
+
+        return value
+
+    cls.execute_command = _sentry_execute_command  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_sync_common.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_sync_common.py
new file mode 100644
index 00000000..ef10e9e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/_sync_common.py
@@ -0,0 +1,113 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations.redis.consts import SPAN_ORIGIN
+from sentry_sdk.integrations.redis.modules.caches import (
+    _compile_cache_span_properties,
+    _set_cache_data,
+)
+from sentry_sdk.integrations.redis.modules.queries import _compile_db_span_properties
+from sentry_sdk.integrations.redis.utils import (
+    _set_client_data,
+    _set_pipeline_data,
+)
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import Any
+
+
+def patch_redis_pipeline(
+    pipeline_cls,
+    is_cluster,
+    get_command_args_fn,
+    set_db_data_fn,
+):
+    # type: (Any, bool, Any, Callable[[Span, Any], None]) -> None
+    old_execute = pipeline_cls.execute
+
+    from sentry_sdk.integrations.redis import RedisIntegration
+
+    def sentry_patched_execute(self, *args, **kwargs):
+        # type: (Any, *Any, **Any) -> Any
+        if sentry_sdk.get_client().get_integration(RedisIntegration) is None:
+            return old_execute(self, *args, **kwargs)
+
+        with sentry_sdk.start_span(
+            op=OP.DB_REDIS,
+            name="redis.pipeline.execute",
+            origin=SPAN_ORIGIN,
+        ) as span:
+            with capture_internal_exceptions():
+                set_db_data_fn(span, self)
+                _set_pipeline_data(
+                    span,
+                    is_cluster,
+                    get_command_args_fn,
+                    False if is_cluster else self.transaction,
+                    self.command_stack,
+                )
+
+            return old_execute(self, *args, **kwargs)
+
+    pipeline_cls.execute = sentry_patched_execute
+
+
+def patch_redis_client(cls, is_cluster, set_db_data_fn):
+    # type: (Any, bool, Callable[[Span, Any], None]) -> None
+    """
+    This function can be used to instrument custom redis client classes or
+    subclasses.
+    """
+    old_execute_command = cls.execute_command
+
+    from sentry_sdk.integrations.redis import RedisIntegration
+
+    def sentry_patched_execute_command(self, name, *args, **kwargs):
+        # type: (Any, str, *Any, **Any) -> Any
+        integration = sentry_sdk.get_client().get_integration(RedisIntegration)
+        if integration is None:
+            return old_execute_command(self, name, *args, **kwargs)
+
+        cache_properties = _compile_cache_span_properties(
+            name,
+            args,
+            kwargs,
+            integration,
+        )
+
+        cache_span = None
+        if cache_properties["is_cache_key"] and cache_properties["op"] is not None:
+            cache_span = sentry_sdk.start_span(
+                op=cache_properties["op"],
+                name=cache_properties["description"],
+                origin=SPAN_ORIGIN,
+            )
+            cache_span.__enter__()
+
+        db_properties = _compile_db_span_properties(integration, name, args)
+
+        db_span = sentry_sdk.start_span(
+            op=db_properties["op"],
+            name=db_properties["description"],
+            origin=SPAN_ORIGIN,
+        )
+        db_span.__enter__()
+
+        set_db_data_fn(db_span, self)
+        _set_client_data(db_span, is_cluster, name, *args)
+
+        value = old_execute_command(self, name, *args, **kwargs)
+
+        db_span.__exit__(None, None, None)
+
+        if cache_span:
+            _set_cache_data(cache_span, self, cache_properties, value)
+            cache_span.__exit__(None, None, None)
+
+        return value
+
+    cls.execute_command = sentry_patched_execute_command
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/consts.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/consts.py
new file mode 100644
index 00000000..737e8297
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/consts.py
@@ -0,0 +1,19 @@
+SPAN_ORIGIN = "auto.db.redis"
+
+_SINGLE_KEY_COMMANDS = frozenset(
+    ["decr", "decrby", "get", "incr", "incrby", "pttl", "set", "setex", "setnx", "ttl"],
+)
+_MULTI_KEY_COMMANDS = frozenset(
+    [
+        "del",
+        "touch",
+        "unlink",
+        "mget",
+    ],
+)
+_COMMANDS_INCLUDING_SENSITIVE_DATA = [
+    "auth",
+]
+_MAX_NUM_ARGS = 10  # Trim argument lists to this many values
+_MAX_NUM_COMMANDS = 10  # Trim command lists to this many values
+_DEFAULT_MAX_DATA_SIZE = 1024
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/caches.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/caches.py
new file mode 100644
index 00000000..c6fc19f5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/caches.py
@@ -0,0 +1,121 @@
+"""
+Code used for the Caches module in Sentry
+"""
+
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations.redis.utils import _get_safe_key, _key_as_string
+from sentry_sdk.utils import capture_internal_exceptions
+
+GET_COMMANDS = ("get", "mget")
+SET_COMMANDS = ("set", "setex")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from sentry_sdk.integrations.redis import RedisIntegration
+    from sentry_sdk.tracing import Span
+    from typing import Any, Optional
+
+
+def _get_op(name):
+    # type: (str) -> Optional[str]
+    op = None
+    if name.lower() in GET_COMMANDS:
+        op = OP.CACHE_GET
+    elif name.lower() in SET_COMMANDS:
+        op = OP.CACHE_PUT
+
+    return op
+
+
+def _compile_cache_span_properties(redis_command, args, kwargs, integration):
+    # type: (str, tuple[Any, ...], dict[str, Any], RedisIntegration) -> dict[str, Any]
+    key = _get_safe_key(redis_command, args, kwargs)
+    key_as_string = _key_as_string(key)
+    keys_as_string = key_as_string.split(", ")
+
+    is_cache_key = False
+    for prefix in integration.cache_prefixes:
+        for kee in keys_as_string:
+            if kee.startswith(prefix):
+                is_cache_key = True
+                break
+        if is_cache_key:
+            break
+
+    value = None
+    if redis_command.lower() in SET_COMMANDS:
+        value = args[-1]
+
+    properties = {
+        "op": _get_op(redis_command),
+        "description": _get_cache_span_description(
+            redis_command, args, kwargs, integration
+        ),
+        "key": key,
+        "key_as_string": key_as_string,
+        "redis_command": redis_command.lower(),
+        "is_cache_key": is_cache_key,
+        "value": value,
+    }
+
+    return properties
+
+
+def _get_cache_span_description(redis_command, args, kwargs, integration):
+    # type: (str, tuple[Any, ...], dict[str, Any], RedisIntegration) -> str
+    description = _key_as_string(_get_safe_key(redis_command, args, kwargs))
+
+    data_should_be_truncated = (
+        integration.max_data_size and len(description) > integration.max_data_size
+    )
+    if data_should_be_truncated:
+        description = description[: integration.max_data_size - len("...")] + "..."
+
+    return description
+
+
+def _set_cache_data(span, redis_client, properties, return_value):
+    # type: (Span, Any, dict[str, Any], Optional[Any]) -> None
+    with capture_internal_exceptions():
+        span.set_data(SPANDATA.CACHE_KEY, properties["key"])
+
+        if properties["redis_command"] in GET_COMMANDS:
+            if return_value is not None:
+                span.set_data(SPANDATA.CACHE_HIT, True)
+                size = (
+                    len(str(return_value).encode("utf-8"))
+                    if not isinstance(return_value, bytes)
+                    else len(return_value)
+                )
+                span.set_data(SPANDATA.CACHE_ITEM_SIZE, size)
+            else:
+                span.set_data(SPANDATA.CACHE_HIT, False)
+
+        elif properties["redis_command"] in SET_COMMANDS:
+            if properties["value"] is not None:
+                size = (
+                    len(properties["value"].encode("utf-8"))
+                    if not isinstance(properties["value"], bytes)
+                    else len(properties["value"])
+                )
+                span.set_data(SPANDATA.CACHE_ITEM_SIZE, size)
+
+        try:
+            connection_params = redis_client.connection_pool.connection_kwargs
+        except AttributeError:
+            # If it is a cluster, there is no connection_pool attribute so we
+            # need to get the default node from the cluster instance
+            default_node = redis_client.get_default_node()
+            connection_params = {
+                "host": default_node.host,
+                "port": default_node.port,
+            }
+
+        host = connection_params.get("host")
+        if host is not None:
+            span.set_data(SPANDATA.NETWORK_PEER_ADDRESS, host)
+
+        port = connection_params.get("port")
+        if port is not None:
+            span.set_data(SPANDATA.NETWORK_PEER_PORT, port)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/queries.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/queries.py
new file mode 100644
index 00000000..e0d85a4e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/modules/queries.py
@@ -0,0 +1,68 @@
+"""
+Code used for the Queries module in Sentry
+"""
+
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations.redis.utils import _get_safe_command
+from sentry_sdk.utils import capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from redis import Redis
+    from sentry_sdk.integrations.redis import RedisIntegration
+    from sentry_sdk.tracing import Span
+    from typing import Any
+
+
+def _compile_db_span_properties(integration, redis_command, args):
+    # type: (RedisIntegration, str, tuple[Any, ...]) -> dict[str, Any]
+    description = _get_db_span_description(integration, redis_command, args)
+
+    properties = {
+        "op": OP.DB_REDIS,
+        "description": description,
+    }
+
+    return properties
+
+
+def _get_db_span_description(integration, command_name, args):
+    # type: (RedisIntegration, str, tuple[Any, ...]) -> str
+    description = command_name
+
+    with capture_internal_exceptions():
+        description = _get_safe_command(command_name, args)
+
+    data_should_be_truncated = (
+        integration.max_data_size and len(description) > integration.max_data_size
+    )
+    if data_should_be_truncated:
+        description = description[: integration.max_data_size - len("...")] + "..."
+
+    return description
+
+
+def _set_db_data_on_span(span, connection_params):
+    # type: (Span, dict[str, Any]) -> None
+    span.set_data(SPANDATA.DB_SYSTEM, "redis")
+
+    db = connection_params.get("db")
+    if db is not None:
+        span.set_data(SPANDATA.DB_NAME, str(db))
+
+    host = connection_params.get("host")
+    if host is not None:
+        span.set_data(SPANDATA.SERVER_ADDRESS, host)
+
+    port = connection_params.get("port")
+    if port is not None:
+        span.set_data(SPANDATA.SERVER_PORT, port)
+
+
+def _set_db_data(span, redis_instance):
+    # type: (Span, Redis[Any]) -> None
+    try:
+        _set_db_data_on_span(span, redis_instance.connection_pool.connection_kwargs)
+    except AttributeError:
+        pass  # connections_kwargs may be missing in some cases
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/rb.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/rb.py
new file mode 100644
index 00000000..1b3e2e53
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/rb.py
@@ -0,0 +1,32 @@
+"""
+Instrumentation for Redis Blaster (rb)
+
+https://github.com/getsentry/rb
+"""
+
+from sentry_sdk.integrations.redis._sync_common import patch_redis_client
+from sentry_sdk.integrations.redis.modules.queries import _set_db_data
+
+
+def _patch_rb():
+    # type: () -> None
+    try:
+        import rb.clients  # type: ignore
+    except ImportError:
+        pass
+    else:
+        patch_redis_client(
+            rb.clients.FanoutClient,
+            is_cluster=False,
+            set_db_data_fn=_set_db_data,
+        )
+        patch_redis_client(
+            rb.clients.MappingClient,
+            is_cluster=False,
+            set_db_data_fn=_set_db_data,
+        )
+        patch_redis_client(
+            rb.clients.RoutingClient,
+            is_cluster=False,
+            set_db_data_fn=_set_db_data,
+        )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis.py
new file mode 100644
index 00000000..c92958a3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis.py
@@ -0,0 +1,69 @@
+"""
+Instrumentation for Redis
+
+https://github.com/redis/redis-py
+"""
+
+from sentry_sdk.integrations.redis._sync_common import (
+    patch_redis_client,
+    patch_redis_pipeline,
+)
+from sentry_sdk.integrations.redis.modules.queries import _set_db_data
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Sequence
+
+
+def _get_redis_command_args(command):
+    # type: (Any) -> Sequence[Any]
+    return command[0]
+
+
+def _patch_redis(StrictRedis, client):  # noqa: N803
+    # type: (Any, Any) -> None
+    patch_redis_client(
+        StrictRedis,
+        is_cluster=False,
+        set_db_data_fn=_set_db_data,
+    )
+    patch_redis_pipeline(
+        client.Pipeline,
+        is_cluster=False,
+        get_command_args_fn=_get_redis_command_args,
+        set_db_data_fn=_set_db_data,
+    )
+    try:
+        strict_pipeline = client.StrictPipeline
+    except AttributeError:
+        pass
+    else:
+        patch_redis_pipeline(
+            strict_pipeline,
+            is_cluster=False,
+            get_command_args_fn=_get_redis_command_args,
+            set_db_data_fn=_set_db_data,
+        )
+
+    try:
+        import redis.asyncio
+    except ImportError:
+        pass
+    else:
+        from sentry_sdk.integrations.redis._async_common import (
+            patch_redis_async_client,
+            patch_redis_async_pipeline,
+        )
+
+        patch_redis_async_client(
+            redis.asyncio.client.StrictRedis,
+            is_cluster=False,
+            set_db_data_fn=_set_db_data,
+        )
+        patch_redis_async_pipeline(
+            redis.asyncio.client.Pipeline,
+            False,
+            _get_redis_command_args,
+            set_db_data_fn=_set_db_data,
+        )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_cluster.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_cluster.py
new file mode 100644
index 00000000..80cdc723
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_cluster.py
@@ -0,0 +1,99 @@
+"""
+Instrumentation for RedisCluster
+This is part of the main redis-py client.
+
+https://github.com/redis/redis-py/blob/master/redis/cluster.py
+"""
+
+from sentry_sdk.integrations.redis._sync_common import (
+    patch_redis_client,
+    patch_redis_pipeline,
+)
+from sentry_sdk.integrations.redis.modules.queries import _set_db_data_on_span
+from sentry_sdk.integrations.redis.utils import _parse_rediscluster_command
+
+from sentry_sdk.utils import capture_internal_exceptions
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from redis import RedisCluster
+    from redis.asyncio.cluster import (
+        RedisCluster as AsyncRedisCluster,
+        ClusterPipeline as AsyncClusterPipeline,
+    )
+    from sentry_sdk.tracing import Span
+
+
+def _set_async_cluster_db_data(span, async_redis_cluster_instance):
+    # type: (Span, AsyncRedisCluster[Any]) -> None
+    default_node = async_redis_cluster_instance.get_default_node()
+    if default_node is not None and default_node.connection_kwargs is not None:
+        _set_db_data_on_span(span, default_node.connection_kwargs)
+
+
+def _set_async_cluster_pipeline_db_data(span, async_redis_cluster_pipeline_instance):
+    # type: (Span, AsyncClusterPipeline[Any]) -> None
+    with capture_internal_exceptions():
+        _set_async_cluster_db_data(
+            span,
+            # the AsyncClusterPipeline has always had a `_client` attr but it is private so potentially problematic and mypy
+            # does not recognize it - see https://github.com/redis/redis-py/blame/v5.0.0/redis/asyncio/cluster.py#L1386
+            async_redis_cluster_pipeline_instance._client,  # type: ignore[attr-defined]
+        )
+
+
+def _set_cluster_db_data(span, redis_cluster_instance):
+    # type: (Span, RedisCluster[Any]) -> None
+    default_node = redis_cluster_instance.get_default_node()
+
+    if default_node is not None:
+        connection_params = {
+            "host": default_node.host,
+            "port": default_node.port,
+        }
+        _set_db_data_on_span(span, connection_params)
+
+
+def _patch_redis_cluster():
+    # type: () -> None
+    """Patches the cluster module on redis SDK (as opposed to rediscluster library)"""
+    try:
+        from redis import RedisCluster, cluster
+    except ImportError:
+        pass
+    else:
+        patch_redis_client(
+            RedisCluster,
+            is_cluster=True,
+            set_db_data_fn=_set_cluster_db_data,
+        )
+        patch_redis_pipeline(
+            cluster.ClusterPipeline,
+            is_cluster=True,
+            get_command_args_fn=_parse_rediscluster_command,
+            set_db_data_fn=_set_cluster_db_data,
+        )
+
+    try:
+        from redis.asyncio import cluster as async_cluster
+    except ImportError:
+        pass
+    else:
+        from sentry_sdk.integrations.redis._async_common import (
+            patch_redis_async_client,
+            patch_redis_async_pipeline,
+        )
+
+        patch_redis_async_client(
+            async_cluster.RedisCluster,
+            is_cluster=True,
+            set_db_data_fn=_set_async_cluster_db_data,
+        )
+        patch_redis_async_pipeline(
+            async_cluster.ClusterPipeline,
+            is_cluster=True,
+            get_command_args_fn=_parse_rediscluster_command,
+            set_db_data_fn=_set_async_cluster_pipeline_db_data,
+        )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_py_cluster_legacy.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_py_cluster_legacy.py
new file mode 100644
index 00000000..ad1c2363
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/redis_py_cluster_legacy.py
@@ -0,0 +1,50 @@
+"""
+Instrumentation for redis-py-cluster
+The project redis-py-cluster is EOL and was integrated into redis-py starting from version 4.1.0 (Dec 26, 2021).
+
+https://github.com/grokzen/redis-py-cluster
+"""
+
+from sentry_sdk.integrations.redis._sync_common import (
+    patch_redis_client,
+    patch_redis_pipeline,
+)
+from sentry_sdk.integrations.redis.modules.queries import _set_db_data
+from sentry_sdk.integrations.redis.utils import _parse_rediscluster_command
+
+
+def _patch_rediscluster():
+    # type: () -> None
+    try:
+        import rediscluster  # type: ignore
+    except ImportError:
+        return
+
+    patch_redis_client(
+        rediscluster.RedisCluster,
+        is_cluster=True,
+        set_db_data_fn=_set_db_data,
+    )
+
+    # up to v1.3.6, __version__ attribute is a tuple
+    # from v2.0.0, __version__ is a string and VERSION a tuple
+    version = getattr(rediscluster, "VERSION", rediscluster.__version__)
+
+    # StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0
+    # https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst
+    if (0, 2, 0) < version < (2, 0, 0):
+        pipeline_cls = rediscluster.pipeline.StrictClusterPipeline
+        patch_redis_client(
+            rediscluster.StrictRedisCluster,
+            is_cluster=True,
+            set_db_data_fn=_set_db_data,
+        )
+    else:
+        pipeline_cls = rediscluster.pipeline.ClusterPipeline
+
+    patch_redis_pipeline(
+        pipeline_cls,
+        is_cluster=True,
+        get_command_args_fn=_parse_rediscluster_command,
+        set_db_data_fn=_set_db_data,
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/utils.py
new file mode 100644
index 00000000..27fae1e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/redis/utils.py
@@ -0,0 +1,144 @@
+from sentry_sdk.consts import SPANDATA
+from sentry_sdk.integrations.redis.consts import (
+    _COMMANDS_INCLUDING_SENSITIVE_DATA,
+    _MAX_NUM_ARGS,
+    _MAX_NUM_COMMANDS,
+    _MULTI_KEY_COMMANDS,
+    _SINGLE_KEY_COMMANDS,
+)
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.utils import SENSITIVE_DATA_SUBSTITUTE
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Optional, Sequence
+    from sentry_sdk.tracing import Span
+
+
+def _get_safe_command(name, args):
+    # type: (str, Sequence[Any]) -> str
+    command_parts = [name]
+
+    for i, arg in enumerate(args):
+        if i > _MAX_NUM_ARGS:
+            break
+
+        name_low = name.lower()
+
+        if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA:
+            command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
+            continue
+
+        arg_is_the_key = i == 0
+        if arg_is_the_key:
+            command_parts.append(repr(arg))
+
+        else:
+            if should_send_default_pii():
+                command_parts.append(repr(arg))
+            else:
+                command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
+
+    command = " ".join(command_parts)
+    return command
+
+
+def _safe_decode(key):
+    # type: (Any) -> str
+    if isinstance(key, bytes):
+        try:
+            return key.decode()
+        except UnicodeDecodeError:
+            return ""
+
+    return str(key)
+
+
+def _key_as_string(key):
+    # type: (Any) -> str
+    if isinstance(key, (dict, list, tuple)):
+        key = ", ".join(_safe_decode(x) for x in key)
+    elif isinstance(key, bytes):
+        key = _safe_decode(key)
+    elif key is None:
+        key = ""
+    else:
+        key = str(key)
+
+    return key
+
+
+def _get_safe_key(method_name, args, kwargs):
+    # type: (str, Optional[tuple[Any, ...]], Optional[dict[str, Any]]) -> Optional[tuple[str, ...]]
+    """
+    Gets the key (or keys) from the given method_name.
+    The method_name could be a redis command or a django caching command
+    """
+    key = None
+
+    if args is not None and method_name.lower() in _MULTI_KEY_COMMANDS:
+        # for example redis "mget"
+        key = tuple(args)
+
+    elif args is not None and len(args) >= 1:
+        # for example django "set_many/get_many" or redis "get"
+        if isinstance(args[0], (dict, list, tuple)):
+            key = tuple(args[0])
+        else:
+            key = (args[0],)
+
+    elif kwargs is not None and "key" in kwargs:
+        # this is a legacy case for older versions of Django
+        if isinstance(kwargs["key"], (list, tuple)):
+            if len(kwargs["key"]) > 0:
+                key = tuple(kwargs["key"])
+        else:
+            if kwargs["key"] is not None:
+                key = (kwargs["key"],)
+
+    return key
+
+
+def _parse_rediscluster_command(command):
+    # type: (Any) -> Sequence[Any]
+    return command.args
+
+
+def _set_pipeline_data(
+    span, is_cluster, get_command_args_fn, is_transaction, command_stack
+):
+    # type: (Span, bool, Any, bool, Sequence[Any]) -> None
+    span.set_tag("redis.is_cluster", is_cluster)
+    span.set_tag("redis.transaction", is_transaction)
+
+    commands = []
+    for i, arg in enumerate(command_stack):
+        if i >= _MAX_NUM_COMMANDS:
+            break
+
+        command = get_command_args_fn(arg)
+        commands.append(_get_safe_command(command[0], command[1:]))
+
+    span.set_data(
+        "redis.commands",
+        {
+            "count": len(command_stack),
+            "first_ten": commands,
+        },
+    )
+
+
+def _set_client_data(span, is_cluster, name, *args):
+    # type: (Span, bool, str, *Any) -> None
+    span.set_tag("redis.is_cluster", is_cluster)
+    if name:
+        span.set_tag("redis.command", name)
+        span.set_tag(SPANDATA.DB_OPERATION, name)
+
+    if name and args:
+        name_low = name.lower()
+        if (name_low in _SINGLE_KEY_COMMANDS) or (
+            name_low in _MULTI_KEY_COMMANDS and len(args) == 1
+        ):
+            span.set_tag("redis.key", args[0])
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rq.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rq.py
new file mode 100644
index 00000000..6d7fcf72
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rq.py
@@ -0,0 +1,161 @@
+import weakref
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.api import continue_trace
+from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    format_timestamp,
+    parse_version,
+)
+
+try:
+    from rq.queue import Queue
+    from rq.timeouts import JobTimeoutException
+    from rq.version import VERSION as RQ_VERSION
+    from rq.worker import Worker
+    from rq.job import JobStatus
+except ImportError:
+    raise DidNotEnable("RQ not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable
+
+    from sentry_sdk._types import Event, EventProcessor
+    from sentry_sdk.utils import ExcInfo
+
+    from rq.job import Job
+
+
+class RqIntegration(Integration):
+    identifier = "rq"
+    origin = f"auto.queue.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(RQ_VERSION)
+        _check_minimum_version(RqIntegration, version)
+
+        old_perform_job = Worker.perform_job
+
+        @ensure_integration_enabled(RqIntegration, old_perform_job)
+        def sentry_patched_perform_job(self, job, *args, **kwargs):
+            # type: (Any, Job, *Queue, **Any) -> bool
+            with sentry_sdk.new_scope() as scope:
+                scope.clear_breadcrumbs()
+                scope.add_event_processor(_make_event_processor(weakref.ref(job)))
+
+                transaction = continue_trace(
+                    job.meta.get("_sentry_trace_headers") or {},
+                    op=OP.QUEUE_TASK_RQ,
+                    name="unknown RQ task",
+                    source=TransactionSource.TASK,
+                    origin=RqIntegration.origin,
+                )
+
+                with capture_internal_exceptions():
+                    transaction.name = job.func_name
+
+                with sentry_sdk.start_transaction(
+                    transaction,
+                    custom_sampling_context={"rq_job": job},
+                ):
+                    rv = old_perform_job(self, job, *args, **kwargs)
+
+            if self.is_horse:
+                # We're inside of a forked process and RQ is
+                # about to call `os._exit`. Make sure that our
+                # events get sent out.
+                sentry_sdk.get_client().flush()
+
+            return rv
+
+        Worker.perform_job = sentry_patched_perform_job
+
+        old_handle_exception = Worker.handle_exception
+
+        def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
+            # type: (Worker, Any, *Any, **Any) -> Any
+            retry = (
+                hasattr(job, "retries_left")
+                and job.retries_left
+                and job.retries_left > 0
+            )
+            failed = job._status == JobStatus.FAILED or job.is_failed
+            if failed and not retry:
+                _capture_exception(exc_info)
+
+            return old_handle_exception(self, job, *exc_info, **kwargs)
+
+        Worker.handle_exception = sentry_patched_handle_exception
+
+        old_enqueue_job = Queue.enqueue_job
+
+        @ensure_integration_enabled(RqIntegration, old_enqueue_job)
+        def sentry_patched_enqueue_job(self, job, **kwargs):
+            # type: (Queue, Any, **Any) -> Any
+            scope = sentry_sdk.get_current_scope()
+            if scope.span is not None:
+                job.meta["_sentry_trace_headers"] = dict(
+                    scope.iter_trace_propagation_headers()
+                )
+
+            return old_enqueue_job(self, job, **kwargs)
+
+        Queue.enqueue_job = sentry_patched_enqueue_job
+
+        ignore_logger("rq.worker")
+
+
+def _make_event_processor(weak_job):
+    # type: (Callable[[], Job]) -> EventProcessor
+    def event_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        job = weak_job()
+        if job is not None:
+            with capture_internal_exceptions():
+                extra = event.setdefault("extra", {})
+                rq_job = {
+                    "job_id": job.id,
+                    "func": job.func_name,
+                    "args": job.args,
+                    "kwargs": job.kwargs,
+                    "description": job.description,
+                }
+
+                if job.enqueued_at:
+                    rq_job["enqueued_at"] = format_timestamp(job.enqueued_at)
+                if job.started_at:
+                    rq_job["started_at"] = format_timestamp(job.started_at)
+
+                extra["rq-job"] = rq_job
+
+        if "exc_info" in hint:
+            with capture_internal_exceptions():
+                if issubclass(hint["exc_info"][0], JobTimeoutException):
+                    event["fingerprint"] = ["rq", "JobTimeoutException", job.func_name]
+
+        return event
+
+    return event_processor
+
+
+def _capture_exception(exc_info, **kwargs):
+    # type: (ExcInfo, **Any) -> None
+    client = sentry_sdk.get_client()
+
+    event, hint = event_from_exception(
+        exc_info,
+        client_options=client.options,
+        mechanism={"type": "rq", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rust_tracing.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rust_tracing.py
new file mode 100644
index 00000000..e4c21181
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/rust_tracing.py
@@ -0,0 +1,284 @@
+"""
+This integration ingests tracing data from native extensions written in Rust.
+
+Using it requires additional setup on the Rust side to accept a
+`RustTracingLayer` Python object and register it with the `tracing-subscriber`
+using an adapter from the `pyo3-python-tracing-subscriber` crate. For example:
+```rust
+#[pyfunction]
+pub fn initialize_tracing(py_impl: Bound<'_, PyAny>) {
+    tracing_subscriber::registry()
+        .with(pyo3_python_tracing_subscriber::PythonCallbackLayerBridge::new(py_impl))
+        .init();
+}
+```
+
+Usage in Python would then look like:
+```
+sentry_sdk.init(
+    dsn=sentry_dsn,
+    integrations=[
+        RustTracingIntegration(
+            "demo_rust_extension",
+            demo_rust_extension.initialize_tracing,
+            event_type_mapping=event_type_mapping,
+        )
+    ],
+)
+```
+
+Each native extension requires its own integration.
+"""
+
+import json
+from enum import Enum, auto
+from typing import Any, Callable, Dict, Tuple, Optional
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import Span as SentrySpan
+from sentry_sdk.utils import SENSITIVE_DATA_SUBSTITUTE
+
+TraceState = Optional[Tuple[Optional[SentrySpan], SentrySpan]]
+
+
+class RustTracingLevel(Enum):
+    Trace = "TRACE"
+    Debug = "DEBUG"
+    Info = "INFO"
+    Warn = "WARN"
+    Error = "ERROR"
+
+
+class EventTypeMapping(Enum):
+    Ignore = auto()
+    Exc = auto()
+    Breadcrumb = auto()
+    Event = auto()
+
+
+def tracing_level_to_sentry_level(level):
+    # type: (str) -> sentry_sdk._types.LogLevelStr
+    level = RustTracingLevel(level)
+    if level in (RustTracingLevel.Trace, RustTracingLevel.Debug):
+        return "debug"
+    elif level == RustTracingLevel.Info:
+        return "info"
+    elif level == RustTracingLevel.Warn:
+        return "warning"
+    elif level == RustTracingLevel.Error:
+        return "error"
+    else:
+        # Better this than crashing
+        return "info"
+
+
+def extract_contexts(event: Dict[str, Any]) -> Dict[str, Any]:
+    metadata = event.get("metadata", {})
+    contexts = {}
+
+    location = {}
+    for field in ["module_path", "file", "line"]:
+        if field in metadata:
+            location[field] = metadata[field]
+    if len(location) > 0:
+        contexts["rust_tracing_location"] = location
+
+    fields = {}
+    for field in metadata.get("fields", []):
+        fields[field] = event.get(field)
+    if len(fields) > 0:
+        contexts["rust_tracing_fields"] = fields
+
+    return contexts
+
+
+def process_event(event: Dict[str, Any]) -> None:
+    metadata = event.get("metadata", {})
+
+    logger = metadata.get("target")
+    level = tracing_level_to_sentry_level(metadata.get("level"))
+    message = event.get("message")  # type: sentry_sdk._types.Any
+    contexts = extract_contexts(event)
+
+    sentry_event = {
+        "logger": logger,
+        "level": level,
+        "message": message,
+        "contexts": contexts,
+    }  # type: sentry_sdk._types.Event
+
+    sentry_sdk.capture_event(sentry_event)
+
+
+def process_exception(event: Dict[str, Any]) -> None:
+    process_event(event)
+
+
+def process_breadcrumb(event: Dict[str, Any]) -> None:
+    level = tracing_level_to_sentry_level(event.get("metadata", {}).get("level"))
+    message = event.get("message")
+
+    sentry_sdk.add_breadcrumb(level=level, message=message)
+
+
+def default_span_filter(metadata: Dict[str, Any]) -> bool:
+    return RustTracingLevel(metadata.get("level")) in (
+        RustTracingLevel.Error,
+        RustTracingLevel.Warn,
+        RustTracingLevel.Info,
+    )
+
+
+def default_event_type_mapping(metadata: Dict[str, Any]) -> EventTypeMapping:
+    level = RustTracingLevel(metadata.get("level"))
+    if level == RustTracingLevel.Error:
+        return EventTypeMapping.Exc
+    elif level in (RustTracingLevel.Warn, RustTracingLevel.Info):
+        return EventTypeMapping.Breadcrumb
+    elif level in (RustTracingLevel.Debug, RustTracingLevel.Trace):
+        return EventTypeMapping.Ignore
+    else:
+        return EventTypeMapping.Ignore
+
+
+class RustTracingLayer:
+    def __init__(
+        self,
+        origin: str,
+        event_type_mapping: Callable[
+            [Dict[str, Any]], EventTypeMapping
+        ] = default_event_type_mapping,
+        span_filter: Callable[[Dict[str, Any]], bool] = default_span_filter,
+        include_tracing_fields: Optional[bool] = None,
+    ):
+        self.origin = origin
+        self.event_type_mapping = event_type_mapping
+        self.span_filter = span_filter
+        self.include_tracing_fields = include_tracing_fields
+
+    def _include_tracing_fields(self) -> bool:
+        """
+        By default, the values of tracing fields are not included in case they
+        contain PII. A user may override that by passing `True` for the
+        `include_tracing_fields` keyword argument of this integration or by
+        setting `send_default_pii` to `True` in their Sentry client options.
+        """
+        return (
+            should_send_default_pii()
+            if self.include_tracing_fields is None
+            else self.include_tracing_fields
+        )
+
+    def on_event(self, event: str, _span_state: TraceState) -> None:
+        deserialized_event = json.loads(event)
+        metadata = deserialized_event.get("metadata", {})
+
+        event_type = self.event_type_mapping(metadata)
+        if event_type == EventTypeMapping.Ignore:
+            return
+        elif event_type == EventTypeMapping.Exc:
+            process_exception(deserialized_event)
+        elif event_type == EventTypeMapping.Breadcrumb:
+            process_breadcrumb(deserialized_event)
+        elif event_type == EventTypeMapping.Event:
+            process_event(deserialized_event)
+
+    def on_new_span(self, attrs: str, span_id: str) -> TraceState:
+        attrs = json.loads(attrs)
+        metadata = attrs.get("metadata", {})
+
+        if not self.span_filter(metadata):
+            return None
+
+        module_path = metadata.get("module_path")
+        name = metadata.get("name")
+        message = attrs.get("message")
+
+        if message is not None:
+            sentry_span_name = message
+        elif module_path is not None and name is not None:
+            sentry_span_name = f"{module_path}::{name}"  # noqa: E231
+        elif name is not None:
+            sentry_span_name = name
+        else:
+            sentry_span_name = "<unknown>"
+
+        kwargs = {
+            "op": "function",
+            "name": sentry_span_name,
+            "origin": self.origin,
+        }
+
+        scope = sentry_sdk.get_current_scope()
+        parent_sentry_span = scope.span
+        if parent_sentry_span:
+            sentry_span = parent_sentry_span.start_child(**kwargs)
+        else:
+            sentry_span = scope.start_span(**kwargs)
+
+        fields = metadata.get("fields", [])
+        for field in fields:
+            if self._include_tracing_fields():
+                sentry_span.set_data(field, attrs.get(field))
+            else:
+                sentry_span.set_data(field, SENSITIVE_DATA_SUBSTITUTE)
+
+        scope.span = sentry_span
+        return (parent_sentry_span, sentry_span)
+
+    def on_close(self, span_id: str, span_state: TraceState) -> None:
+        if span_state is None:
+            return
+
+        parent_sentry_span, sentry_span = span_state
+        sentry_span.finish()
+        sentry_sdk.get_current_scope().span = parent_sentry_span
+
+    def on_record(self, span_id: str, values: str, span_state: TraceState) -> None:
+        if span_state is None:
+            return
+        _parent_sentry_span, sentry_span = span_state
+
+        deserialized_values = json.loads(values)
+        for key, value in deserialized_values.items():
+            if self._include_tracing_fields():
+                sentry_span.set_data(key, value)
+            else:
+                sentry_span.set_data(key, SENSITIVE_DATA_SUBSTITUTE)
+
+
+class RustTracingIntegration(Integration):
+    """
+    Ingests tracing data from a Rust native extension's `tracing` instrumentation.
+
+    If a project uses more than one Rust native extension, each one will need
+    its own instance of `RustTracingIntegration` with an initializer function
+    specific to that extension.
+
+    Since all of the setup for this integration requires instance-specific state
+    which is not available in `setup_once()`, setup instead happens in `__init__()`.
+    """
+
+    def __init__(
+        self,
+        identifier: str,
+        initializer: Callable[[RustTracingLayer], None],
+        event_type_mapping: Callable[
+            [Dict[str, Any]], EventTypeMapping
+        ] = default_event_type_mapping,
+        span_filter: Callable[[Dict[str, Any]], bool] = default_span_filter,
+        include_tracing_fields: Optional[bool] = None,
+    ):
+        self.identifier = identifier
+        origin = f"auto.function.rust_tracing.{identifier}"
+        self.tracing_layer = RustTracingLayer(
+            origin, event_type_mapping, span_filter, include_tracing_fields
+        )
+
+        initializer(self.tracing_layer)
+
+    @staticmethod
+    def setup_once() -> None:
+        pass
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sanic.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sanic.py
new file mode 100644
index 00000000..bd8f1f32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sanic.py
@@ -0,0 +1,368 @@
+import sys
+import weakref
+from inspect import isawaitable
+from urllib.parse import urlsplit
+
+import sentry_sdk
+from sentry_sdk import continue_trace
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    HAS_REAL_CONTEXTVARS,
+    CONTEXTVARS_ERROR_MESSAGE,
+    parse_version,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Container
+    from typing import Any
+    from typing import Callable
+    from typing import Optional
+    from typing import Union
+    from typing import Dict
+
+    from sanic.request import Request, RequestParameters
+    from sanic.response import BaseHTTPResponse
+
+    from sentry_sdk._types import Event, EventProcessor, ExcInfo, Hint
+    from sanic.router import Route
+
+try:
+    from sanic import Sanic, __version__ as SANIC_VERSION
+    from sanic.exceptions import SanicException
+    from sanic.router import Router
+    from sanic.handlers import ErrorHandler
+except ImportError:
+    raise DidNotEnable("Sanic not installed")
+
+old_error_handler_lookup = ErrorHandler.lookup
+old_handle_request = Sanic.handle_request
+old_router_get = Router.get
+
+try:
+    # This method was introduced in Sanic v21.9
+    old_startup = Sanic._startup
+except AttributeError:
+    pass
+
+
+class SanicIntegration(Integration):
+    identifier = "sanic"
+    origin = f"auto.http.{identifier}"
+    version = None
+
+    def __init__(self, unsampled_statuses=frozenset({404})):
+        # type: (Optional[Container[int]]) -> None
+        """
+        The unsampled_statuses parameter can be used to specify for which HTTP statuses the
+        transactions should not be sent to Sentry. By default, transactions are sent for all
+        HTTP statuses, except 404. Set unsampled_statuses to None to send transactions for all
+        HTTP statuses, including 404.
+        """
+        self._unsampled_statuses = unsampled_statuses or set()
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        SanicIntegration.version = parse_version(SANIC_VERSION)
+        _check_minimum_version(SanicIntegration, SanicIntegration.version)
+
+        if not HAS_REAL_CONTEXTVARS:
+            # We better have contextvars or we're going to leak state between
+            # requests.
+            raise DidNotEnable(
+                "The sanic integration for Sentry requires Python 3.7+ "
+                " or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
+            )
+
+        if SANIC_VERSION.startswith("0.8."):
+            # Sanic 0.8 and older creates a logger named "root" and puts a
+            # stringified version of every exception in there (without exc_info),
+            # which our error deduplication can't detect.
+            #
+            # We explicitly check the version here because it is a very
+            # invasive step to ignore this logger and not necessary in newer
+            # versions at all.
+            #
+            # https://github.com/huge-success/sanic/issues/1332
+            ignore_logger("root")
+
+        if SanicIntegration.version is not None and SanicIntegration.version < (21, 9):
+            _setup_legacy_sanic()
+            return
+
+        _setup_sanic()
+
+
+class SanicRequestExtractor(RequestExtractor):
+    def content_length(self):
+        # type: () -> int
+        if self.request.body is None:
+            return 0
+        return len(self.request.body)
+
+    def cookies(self):
+        # type: () -> Dict[str, str]
+        return dict(self.request.cookies)
+
+    def raw_data(self):
+        # type: () -> bytes
+        return self.request.body
+
+    def form(self):
+        # type: () -> RequestParameters
+        return self.request.form
+
+    def is_json(self):
+        # type: () -> bool
+        raise NotImplementedError()
+
+    def json(self):
+        # type: () -> Optional[Any]
+        return self.request.json
+
+    def files(self):
+        # type: () -> RequestParameters
+        return self.request.files
+
+    def size_of_file(self, file):
+        # type: (Any) -> int
+        return len(file.body or ())
+
+
+def _setup_sanic():
+    # type: () -> None
+    Sanic._startup = _startup
+    ErrorHandler.lookup = _sentry_error_handler_lookup
+
+
+def _setup_legacy_sanic():
+    # type: () -> None
+    Sanic.handle_request = _legacy_handle_request
+    Router.get = _legacy_router_get
+    ErrorHandler.lookup = _sentry_error_handler_lookup
+
+
+async def _startup(self):
+    # type: (Sanic) -> None
+    # This happens about as early in the lifecycle as possible, just after the
+    # Request object is created. The body has not yet been consumed.
+    self.signal("http.lifecycle.request")(_context_enter)
+
+    # This happens after the handler is complete. In v21.9 this signal is not
+    # dispatched when there is an exception. Therefore we need to close out
+    # and call _context_exit from the custom exception handler as well.
+    # See https://github.com/sanic-org/sanic/issues/2297
+    self.signal("http.lifecycle.response")(_context_exit)
+
+    # This happens inside of request handling immediately after the route
+    # has been identified by the router.
+    self.signal("http.routing.after")(_set_transaction)
+
+    # The above signals need to be declared before this can be called.
+    await old_startup(self)
+
+
+async def _context_enter(request):
+    # type: (Request) -> None
+    request.ctx._sentry_do_integration = (
+        sentry_sdk.get_client().get_integration(SanicIntegration) is not None
+    )
+
+    if not request.ctx._sentry_do_integration:
+        return
+
+    weak_request = weakref.ref(request)
+    request.ctx._sentry_scope = sentry_sdk.isolation_scope()
+    scope = request.ctx._sentry_scope.__enter__()
+    scope.clear_breadcrumbs()
+    scope.add_event_processor(_make_request_processor(weak_request))
+
+    transaction = continue_trace(
+        dict(request.headers),
+        op=OP.HTTP_SERVER,
+        # Unless the request results in a 404 error, the name and source will get overwritten in _set_transaction
+        name=request.path,
+        source=TransactionSource.URL,
+        origin=SanicIntegration.origin,
+    )
+    request.ctx._sentry_transaction = sentry_sdk.start_transaction(
+        transaction
+    ).__enter__()
+
+
+async def _context_exit(request, response=None):
+    # type: (Request, Optional[BaseHTTPResponse]) -> None
+    with capture_internal_exceptions():
+        if not request.ctx._sentry_do_integration:
+            return
+
+        integration = sentry_sdk.get_client().get_integration(SanicIntegration)
+
+        response_status = None if response is None else response.status
+
+        # This capture_internal_exceptions block has been intentionally nested here, so that in case an exception
+        # happens while trying to end the transaction, we still attempt to exit the hub.
+        with capture_internal_exceptions():
+            request.ctx._sentry_transaction.set_http_status(response_status)
+            request.ctx._sentry_transaction.sampled &= (
+                isinstance(integration, SanicIntegration)
+                and response_status not in integration._unsampled_statuses
+            )
+            request.ctx._sentry_transaction.__exit__(None, None, None)
+
+        request.ctx._sentry_scope.__exit__(None, None, None)
+
+
+async def _set_transaction(request, route, **_):
+    # type: (Request, Route, **Any) -> None
+    if request.ctx._sentry_do_integration:
+        with capture_internal_exceptions():
+            scope = sentry_sdk.get_current_scope()
+            route_name = route.name.replace(request.app.name, "").strip(".")
+            scope.set_transaction_name(route_name, source=TransactionSource.COMPONENT)
+
+
+def _sentry_error_handler_lookup(self, exception, *args, **kwargs):
+    # type: (Any, Exception, *Any, **Any) -> Optional[object]
+    _capture_exception(exception)
+    old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)
+
+    if old_error_handler is None:
+        return None
+
+    if sentry_sdk.get_client().get_integration(SanicIntegration) is None:
+        return old_error_handler
+
+    async def sentry_wrapped_error_handler(request, exception):
+        # type: (Request, Exception) -> Any
+        try:
+            response = old_error_handler(request, exception)
+            if isawaitable(response):
+                response = await response
+            return response
+        except Exception:
+            # Report errors that occur in Sanic error handler. These
+            # exceptions will not even show up in Sanic's
+            # `sanic.exceptions` logger.
+            exc_info = sys.exc_info()
+            _capture_exception(exc_info)
+            reraise(*exc_info)
+        finally:
+            # As mentioned in previous comment in _startup, this can be removed
+            # after https://github.com/sanic-org/sanic/issues/2297 is resolved
+            if SanicIntegration.version and SanicIntegration.version == (21, 9):
+                await _context_exit(request)
+
+    return sentry_wrapped_error_handler
+
+
+async def _legacy_handle_request(self, request, *args, **kwargs):
+    # type: (Any, Request, *Any, **Any) -> Any
+    if sentry_sdk.get_client().get_integration(SanicIntegration) is None:
+        return await old_handle_request(self, request, *args, **kwargs)
+
+    weak_request = weakref.ref(request)
+
+    with sentry_sdk.isolation_scope() as scope:
+        scope.clear_breadcrumbs()
+        scope.add_event_processor(_make_request_processor(weak_request))
+
+        response = old_handle_request(self, request, *args, **kwargs)
+        if isawaitable(response):
+            response = await response
+
+        return response
+
+
+def _legacy_router_get(self, *args):
+    # type: (Any, Union[Any, Request]) -> Any
+    rv = old_router_get(self, *args)
+    if sentry_sdk.get_client().get_integration(SanicIntegration) is not None:
+        with capture_internal_exceptions():
+            scope = sentry_sdk.get_isolation_scope()
+            if SanicIntegration.version and SanicIntegration.version >= (21, 3):
+                # Sanic versions above and including 21.3 append the app name to the
+                # route name, and so we need to remove it from Route name so the
+                # transaction name is consistent across all versions
+                sanic_app_name = self.ctx.app.name
+                sanic_route = rv[0].name
+
+                if sanic_route.startswith("%s." % sanic_app_name):
+                    # We add a 1 to the len of the sanic_app_name because there is a dot
+                    # that joins app name and the route name
+                    # Format: app_name.route_name
+                    sanic_route = sanic_route[len(sanic_app_name) + 1 :]
+
+                scope.set_transaction_name(
+                    sanic_route, source=TransactionSource.COMPONENT
+                )
+            else:
+                scope.set_transaction_name(
+                    rv[0].__name__, source=TransactionSource.COMPONENT
+                )
+
+    return rv
+
+
+@ensure_integration_enabled(SanicIntegration)
+def _capture_exception(exception):
+    # type: (Union[ExcInfo, BaseException]) -> None
+    with capture_internal_exceptions():
+        event, hint = event_from_exception(
+            exception,
+            client_options=sentry_sdk.get_client().options,
+            mechanism={"type": "sanic", "handled": False},
+        )
+
+        if hint and hasattr(hint["exc_info"][0], "quiet") and hint["exc_info"][0].quiet:
+            return
+
+        sentry_sdk.capture_event(event, hint=hint)
+
+
+def _make_request_processor(weak_request):
+    # type: (Callable[[], Request]) -> EventProcessor
+    def sanic_processor(event, hint):
+        # type: (Event, Optional[Hint]) -> Optional[Event]
+
+        try:
+            if hint and issubclass(hint["exc_info"][0], SanicException):
+                return None
+        except KeyError:
+            pass
+
+        request = weak_request()
+        if request is None:
+            return event
+
+        with capture_internal_exceptions():
+            extractor = SanicRequestExtractor(request)
+            extractor.extract_into_event(event)
+
+            request_info = event["request"]
+            urlparts = urlsplit(request.url)
+
+            request_info["url"] = "%s://%s%s" % (
+                urlparts.scheme,
+                urlparts.netloc,
+                urlparts.path,
+            )
+
+            request_info["query_string"] = urlparts.query
+            request_info["method"] = request.method
+            request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
+            request_info["headers"] = _filter_headers(dict(request.headers))
+
+        return event
+
+    return sanic_processor
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/serverless.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/serverless.py
new file mode 100644
index 00000000..760c07ff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/serverless.py
@@ -0,0 +1,76 @@
+import sys
+from functools import wraps
+
+import sentry_sdk
+from sentry_sdk.utils import event_from_exception, reraise
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import TypeVar
+    from typing import Union
+    from typing import Optional
+    from typing import overload
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+else:
+
+    def overload(x):
+        # type: (F) -> F
+        return x
+
+
+@overload
+def serverless_function(f, flush=True):
+    # type: (F, bool) -> F
+    pass
+
+
+@overload
+def serverless_function(f=None, flush=True):  # noqa: F811
+    # type: (None, bool) -> Callable[[F], F]
+    pass
+
+
+def serverless_function(f=None, flush=True):  # noqa
+    # type: (Optional[F], bool) -> Union[F, Callable[[F], F]]
+    def wrapper(f):
+        # type: (F) -> F
+        @wraps(f)
+        def inner(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            with sentry_sdk.isolation_scope() as scope:
+                scope.clear_breadcrumbs()
+
+                try:
+                    return f(*args, **kwargs)
+                except Exception:
+                    _capture_and_reraise()
+                finally:
+                    if flush:
+                        sentry_sdk.flush()
+
+        return inner  # type: ignore
+
+    if f is None:
+        return wrapper
+    else:
+        return wrapper(f)
+
+
+def _capture_and_reraise():
+    # type: () -> None
+    exc_info = sys.exc_info()
+    client = sentry_sdk.get_client()
+    if client.is_active():
+        event, hint = event_from_exception(
+            exc_info,
+            client_options=client.options,
+            mechanism={"type": "serverless", "handled": False},
+        )
+        sentry_sdk.capture_event(event, hint=hint)
+
+    reraise(*exc_info)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/socket.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/socket.py
new file mode 100644
index 00000000..babf61aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/socket.py
@@ -0,0 +1,96 @@
+import socket
+
+import sentry_sdk
+from sentry_sdk._types import MYPY
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import Integration
+
+if MYPY:
+    from socket import AddressFamily, SocketKind
+    from typing import Tuple, Optional, Union, List
+
+__all__ = ["SocketIntegration"]
+
+
+class SocketIntegration(Integration):
+    identifier = "socket"
+    origin = f"auto.socket.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        """
+        patches two of the most used functions of socket: create_connection and getaddrinfo(dns resolver)
+        """
+        _patch_create_connection()
+        _patch_getaddrinfo()
+
+
+def _get_span_description(host, port):
+    # type: (Union[bytes, str, None], Union[bytes, str, int, None]) -> str
+
+    try:
+        host = host.decode()  # type: ignore
+    except (UnicodeDecodeError, AttributeError):
+        pass
+
+    try:
+        port = port.decode()  # type: ignore
+    except (UnicodeDecodeError, AttributeError):
+        pass
+
+    description = "%s:%s" % (host, port)  # type: ignore
+    return description
+
+
+def _patch_create_connection():
+    # type: () -> None
+    real_create_connection = socket.create_connection
+
+    def create_connection(
+        address,
+        timeout=socket._GLOBAL_DEFAULT_TIMEOUT,  # type: ignore
+        source_address=None,
+    ):
+        # type: (Tuple[Optional[str], int], Optional[float], Optional[Tuple[Union[bytearray, bytes, str], int]])-> socket.socket
+        integration = sentry_sdk.get_client().get_integration(SocketIntegration)
+        if integration is None:
+            return real_create_connection(address, timeout, source_address)
+
+        with sentry_sdk.start_span(
+            op=OP.SOCKET_CONNECTION,
+            name=_get_span_description(address[0], address[1]),
+            origin=SocketIntegration.origin,
+        ) as span:
+            span.set_data("address", address)
+            span.set_data("timeout", timeout)
+            span.set_data("source_address", source_address)
+
+            return real_create_connection(
+                address=address, timeout=timeout, source_address=source_address
+            )
+
+    socket.create_connection = create_connection  # type: ignore
+
+
+def _patch_getaddrinfo():
+    # type: () -> None
+    real_getaddrinfo = socket.getaddrinfo
+
+    def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
+        # type: (Union[bytes, str, None], Union[bytes, str, int, None], int, int, int, int) -> List[Tuple[AddressFamily, SocketKind, int, str, Union[Tuple[str, int], Tuple[str, int, int, int], Tuple[int, bytes]]]]
+        integration = sentry_sdk.get_client().get_integration(SocketIntegration)
+        if integration is None:
+            return real_getaddrinfo(host, port, family, type, proto, flags)
+
+        with sentry_sdk.start_span(
+            op=OP.SOCKET_DNS,
+            name=_get_span_description(host, port),
+            origin=SocketIntegration.origin,
+        ) as span:
+            span.set_data("host", host)
+            span.set_data("port", port)
+
+            return real_getaddrinfo(host, port, family, type, proto, flags)
+
+    socket.getaddrinfo = getaddrinfo
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/__init__.py
new file mode 100644
index 00000000..10d94163
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/__init__.py
@@ -0,0 +1,4 @@
+from sentry_sdk.integrations.spark.spark_driver import SparkIntegration
+from sentry_sdk.integrations.spark.spark_worker import SparkWorkerIntegration
+
+__all__ = ["SparkIntegration", "SparkWorkerIntegration"]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_driver.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_driver.py
new file mode 100644
index 00000000..fac98535
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_driver.py
@@ -0,0 +1,315 @@
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.utils import capture_internal_exceptions, ensure_integration_enabled
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+
+    from sentry_sdk._types import Event, Hint
+    from pyspark import SparkContext
+
+
+class SparkIntegration(Integration):
+    identifier = "spark"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _setup_sentry_tracing()
+
+
+def _set_app_properties():
+    # type: () -> None
+    """
+    Set properties in driver that propagate to worker processes, allowing for workers to have access to those properties.
+    This allows worker integration to have access to app_name and application_id.
+    """
+    from pyspark import SparkContext
+
+    spark_context = SparkContext._active_spark_context
+    if spark_context:
+        spark_context.setLocalProperty(
+            "sentry_app_name",
+            spark_context.appName,
+        )
+        spark_context.setLocalProperty(
+            "sentry_application_id",
+            spark_context.applicationId,
+        )
+
+
+def _start_sentry_listener(sc):
+    # type: (SparkContext) -> None
+    """
+    Start java gateway server to add custom `SparkListener`
+    """
+    from pyspark.java_gateway import ensure_callback_server_started
+
+    gw = sc._gateway
+    ensure_callback_server_started(gw)
+    listener = SentryListener()
+    sc._jsc.sc().addSparkListener(listener)
+
+
+def _add_event_processor(sc):
+    # type: (SparkContext) -> None
+    scope = sentry_sdk.get_isolation_scope()
+
+    @scope.add_event_processor
+    def process_event(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+        with capture_internal_exceptions():
+            if sentry_sdk.get_client().get_integration(SparkIntegration) is None:
+                return event
+
+            if sc._active_spark_context is None:
+                return event
+
+            event.setdefault("user", {}).setdefault("id", sc.sparkUser())
+
+            event.setdefault("tags", {}).setdefault(
+                "executor.id", sc._conf.get("spark.executor.id")
+            )
+            event["tags"].setdefault(
+                "spark-submit.deployMode",
+                sc._conf.get("spark.submit.deployMode"),
+            )
+            event["tags"].setdefault("driver.host", sc._conf.get("spark.driver.host"))
+            event["tags"].setdefault("driver.port", sc._conf.get("spark.driver.port"))
+            event["tags"].setdefault("spark_version", sc.version)
+            event["tags"].setdefault("app_name", sc.appName)
+            event["tags"].setdefault("application_id", sc.applicationId)
+            event["tags"].setdefault("master", sc.master)
+            event["tags"].setdefault("spark_home", sc.sparkHome)
+
+            event.setdefault("extra", {}).setdefault("web_url", sc.uiWebUrl)
+
+        return event
+
+
+def _activate_integration(sc):
+    # type: (SparkContext) -> None
+
+    _start_sentry_listener(sc)
+    _set_app_properties()
+    _add_event_processor(sc)
+
+
+def _patch_spark_context_init():
+    # type: () -> None
+    from pyspark import SparkContext
+
+    spark_context_init = SparkContext._do_init
+
+    @ensure_integration_enabled(SparkIntegration, spark_context_init)
+    def _sentry_patched_spark_context_init(self, *args, **kwargs):
+        # type: (SparkContext, *Any, **Any) -> Optional[Any]
+        rv = spark_context_init(self, *args, **kwargs)
+        _activate_integration(self)
+        return rv
+
+    SparkContext._do_init = _sentry_patched_spark_context_init
+
+
+def _setup_sentry_tracing():
+    # type: () -> None
+    from pyspark import SparkContext
+
+    if SparkContext._active_spark_context is not None:
+        _activate_integration(SparkContext._active_spark_context)
+        return
+    _patch_spark_context_init()
+
+
+class SparkListener:
+    def onApplicationEnd(self, applicationEnd):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onApplicationStart(self, applicationStart):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onBlockManagerAdded(self, blockManagerAdded):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onBlockManagerRemoved(self, blockManagerRemoved):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onBlockUpdated(self, blockUpdated):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onEnvironmentUpdate(self, environmentUpdate):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onExecutorAdded(self, executorAdded):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onExecutorBlacklisted(self, executorBlacklisted):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onExecutorBlacklistedForStage(  # noqa: N802
+        self, executorBlacklistedForStage  # noqa: N803
+    ):
+        # type: (Any) -> None
+        pass
+
+    def onExecutorMetricsUpdate(self, executorMetricsUpdate):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onExecutorRemoved(self, executorRemoved):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onJobEnd(self, jobEnd):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onJobStart(self, jobStart):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onNodeBlacklisted(self, nodeBlacklisted):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onNodeBlacklistedForStage(self, nodeBlacklistedForStage):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onNodeUnblacklisted(self, nodeUnblacklisted):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onOtherEvent(self, event):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onSpeculativeTaskSubmitted(self, speculativeTask):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onStageCompleted(self, stageCompleted):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onStageSubmitted(self, stageSubmitted):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onTaskEnd(self, taskEnd):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onTaskGettingResult(self, taskGettingResult):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onTaskStart(self, taskStart):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    def onUnpersistRDD(self, unpersistRDD):  # noqa: N802,N803
+        # type: (Any) -> None
+        pass
+
+    class Java:
+        implements = ["org.apache.spark.scheduler.SparkListenerInterface"]
+
+
+class SentryListener(SparkListener):
+    def _add_breadcrumb(
+        self,
+        level,  # type: str
+        message,  # type: str
+        data=None,  # type: Optional[dict[str, Any]]
+    ):
+        # type: (...) -> None
+        sentry_sdk.get_isolation_scope().add_breadcrumb(
+            level=level, message=message, data=data
+        )
+
+    def onJobStart(self, jobStart):  # noqa: N802,N803
+        # type: (Any) -> None
+        sentry_sdk.get_isolation_scope().clear_breadcrumbs()
+
+        message = "Job {} Started".format(jobStart.jobId())
+        self._add_breadcrumb(level="info", message=message)
+        _set_app_properties()
+
+    def onJobEnd(self, jobEnd):  # noqa: N802,N803
+        # type: (Any) -> None
+        level = ""
+        message = ""
+        data = {"result": jobEnd.jobResult().toString()}
+
+        if jobEnd.jobResult().toString() == "JobSucceeded":
+            level = "info"
+            message = "Job {} Ended".format(jobEnd.jobId())
+        else:
+            level = "warning"
+            message = "Job {} Failed".format(jobEnd.jobId())
+
+        self._add_breadcrumb(level=level, message=message, data=data)
+
+    def onStageSubmitted(self, stageSubmitted):  # noqa: N802,N803
+        # type: (Any) -> None
+        stage_info = stageSubmitted.stageInfo()
+        message = "Stage {} Submitted".format(stage_info.stageId())
+
+        data = {"name": stage_info.name()}
+        attempt_id = _get_attempt_id(stage_info)
+        if attempt_id is not None:
+            data["attemptId"] = attempt_id
+
+        self._add_breadcrumb(level="info", message=message, data=data)
+        _set_app_properties()
+
+    def onStageCompleted(self, stageCompleted):  # noqa: N802,N803
+        # type: (Any) -> None
+        from py4j.protocol import Py4JJavaError  # type: ignore
+
+        stage_info = stageCompleted.stageInfo()
+        message = ""
+        level = ""
+
+        data = {"name": stage_info.name()}
+        attempt_id = _get_attempt_id(stage_info)
+        if attempt_id is not None:
+            data["attemptId"] = attempt_id
+
+        # Have to Try Except because stageInfo.failureReason() is typed with Scala Option
+        try:
+            data["reason"] = stage_info.failureReason().get()
+            message = "Stage {} Failed".format(stage_info.stageId())
+            level = "warning"
+        except Py4JJavaError:
+            message = "Stage {} Completed".format(stage_info.stageId())
+            level = "info"
+
+        self._add_breadcrumb(level=level, message=message, data=data)
+
+
+def _get_attempt_id(stage_info):
+    # type: (Any) -> Optional[int]
+    try:
+        return stage_info.attemptId()
+    except Exception:
+        pass
+
+    try:
+        return stage_info.attemptNumber()
+    except Exception:
+        pass
+
+    return None
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_worker.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_worker.py
new file mode 100644
index 00000000..5340a0b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/spark/spark_worker.py
@@ -0,0 +1,116 @@
+import sys
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    exc_info_from_error,
+    single_exception_from_error_tuple,
+    walk_exception_chain,
+    event_hint_with_exc_info,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+
+    from sentry_sdk._types import ExcInfo, Event, Hint
+
+
+class SparkWorkerIntegration(Integration):
+    identifier = "spark_worker"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        import pyspark.daemon as original_daemon
+
+        original_daemon.worker_main = _sentry_worker_main
+
+
+def _capture_exception(exc_info):
+    # type: (ExcInfo) -> None
+    client = sentry_sdk.get_client()
+
+    mechanism = {"type": "spark", "handled": False}
+
+    exc_info = exc_info_from_error(exc_info)
+
+    exc_type, exc_value, tb = exc_info
+    rv = []
+
+    # On Exception worker will call sys.exit(-1), so we can ignore SystemExit and similar errors
+    for exc_type, exc_value, tb in walk_exception_chain(exc_info):
+        if exc_type not in (SystemExit, EOFError, ConnectionResetError):
+            rv.append(
+                single_exception_from_error_tuple(
+                    exc_type, exc_value, tb, client.options, mechanism
+                )
+            )
+
+    if rv:
+        rv.reverse()
+        hint = event_hint_with_exc_info(exc_info)
+        event = {"level": "error", "exception": {"values": rv}}  # type: Event
+
+        _tag_task_context()
+
+        sentry_sdk.capture_event(event, hint=hint)
+
+
+def _tag_task_context():
+    # type: () -> None
+    from pyspark.taskcontext import TaskContext
+
+    scope = sentry_sdk.get_isolation_scope()
+
+    @scope.add_event_processor
+    def process_event(event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+        with capture_internal_exceptions():
+            integration = sentry_sdk.get_client().get_integration(
+                SparkWorkerIntegration
+            )
+            task_context = TaskContext.get()
+
+            if integration is None or task_context is None:
+                return event
+
+            event.setdefault("tags", {}).setdefault(
+                "stageId", str(task_context.stageId())
+            )
+            event["tags"].setdefault("partitionId", str(task_context.partitionId()))
+            event["tags"].setdefault("attemptNumber", str(task_context.attemptNumber()))
+            event["tags"].setdefault("taskAttemptId", str(task_context.taskAttemptId()))
+
+            if task_context._localProperties:
+                if "sentry_app_name" in task_context._localProperties:
+                    event["tags"].setdefault(
+                        "app_name", task_context._localProperties["sentry_app_name"]
+                    )
+                    event["tags"].setdefault(
+                        "application_id",
+                        task_context._localProperties["sentry_application_id"],
+                    )
+
+                if "callSite.short" in task_context._localProperties:
+                    event.setdefault("extra", {}).setdefault(
+                        "callSite", task_context._localProperties["callSite.short"]
+                    )
+
+        return event
+
+
+def _sentry_worker_main(*args, **kwargs):
+    # type: (*Optional[Any], **Optional[Any]) -> None
+    import pyspark.worker as original_worker
+
+    try:
+        original_worker.main(*args, **kwargs)
+    except SystemExit:
+        if sentry_sdk.get_client().get_integration(SparkWorkerIntegration) is not None:
+            exc_info = sys.exc_info()
+            with capture_internal_exceptions():
+                _capture_exception(exc_info)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sqlalchemy.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sqlalchemy.py
new file mode 100644
index 00000000..068d3730
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sqlalchemy.py
@@ -0,0 +1,146 @@
+from sentry_sdk.consts import SPANSTATUS, SPANDATA
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.tracing_utils import add_query_source, record_sql_queries
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    parse_version,
+)
+
+try:
+    from sqlalchemy.engine import Engine  # type: ignore
+    from sqlalchemy.event import listen  # type: ignore
+    from sqlalchemy import __version__ as SQLALCHEMY_VERSION  # type: ignore
+except ImportError:
+    raise DidNotEnable("SQLAlchemy not installed.")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import ContextManager
+    from typing import Optional
+
+    from sentry_sdk.tracing import Span
+
+
+class SqlalchemyIntegration(Integration):
+    identifier = "sqlalchemy"
+    origin = f"auto.db.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(SQLALCHEMY_VERSION)
+        _check_minimum_version(SqlalchemyIntegration, version)
+
+        listen(Engine, "before_cursor_execute", _before_cursor_execute)
+        listen(Engine, "after_cursor_execute", _after_cursor_execute)
+        listen(Engine, "handle_error", _handle_error)
+
+
+@ensure_integration_enabled(SqlalchemyIntegration)
+def _before_cursor_execute(
+    conn, cursor, statement, parameters, context, executemany, *args
+):
+    # type: (Any, Any, Any, Any, Any, bool, *Any) -> None
+    ctx_mgr = record_sql_queries(
+        cursor,
+        statement,
+        parameters,
+        paramstyle=context and context.dialect and context.dialect.paramstyle or None,
+        executemany=executemany,
+        span_origin=SqlalchemyIntegration.origin,
+    )
+    context._sentry_sql_span_manager = ctx_mgr
+
+    span = ctx_mgr.__enter__()
+
+    if span is not None:
+        _set_db_data(span, conn)
+        context._sentry_sql_span = span
+
+
+@ensure_integration_enabled(SqlalchemyIntegration)
+def _after_cursor_execute(conn, cursor, statement, parameters, context, *args):
+    # type: (Any, Any, Any, Any, Any, *Any) -> None
+    ctx_mgr = getattr(
+        context, "_sentry_sql_span_manager", None
+    )  # type: Optional[ContextManager[Any]]
+
+    if ctx_mgr is not None:
+        context._sentry_sql_span_manager = None
+        ctx_mgr.__exit__(None, None, None)
+
+    span = getattr(context, "_sentry_sql_span", None)  # type: Optional[Span]
+    if span is not None:
+        with capture_internal_exceptions():
+            add_query_source(span)
+
+
+def _handle_error(context, *args):
+    # type: (Any, *Any) -> None
+    execution_context = context.execution_context
+    if execution_context is None:
+        return
+
+    span = getattr(execution_context, "_sentry_sql_span", None)  # type: Optional[Span]
+
+    if span is not None:
+        span.set_status(SPANSTATUS.INTERNAL_ERROR)
+
+    # _after_cursor_execute does not get called for crashing SQL stmts. Judging
+    # from SQLAlchemy codebase it does seem like any error coming into this
+    # handler is going to be fatal.
+    ctx_mgr = getattr(
+        execution_context, "_sentry_sql_span_manager", None
+    )  # type: Optional[ContextManager[Any]]
+
+    if ctx_mgr is not None:
+        execution_context._sentry_sql_span_manager = None
+        ctx_mgr.__exit__(None, None, None)
+
+
+# See: https://docs.sqlalchemy.org/en/20/dialects/index.html
+def _get_db_system(name):
+    # type: (str) -> Optional[str]
+    name = str(name)
+
+    if "sqlite" in name:
+        return "sqlite"
+
+    if "postgres" in name:
+        return "postgresql"
+
+    if "mariadb" in name:
+        return "mariadb"
+
+    if "mysql" in name:
+        return "mysql"
+
+    if "oracle" in name:
+        return "oracle"
+
+    return None
+
+
+def _set_db_data(span, conn):
+    # type: (Span, Any) -> None
+    db_system = _get_db_system(conn.engine.name)
+    if db_system is not None:
+        span.set_data(SPANDATA.DB_SYSTEM, db_system)
+
+    if conn.engine.url is None:
+        return
+
+    db_name = conn.engine.url.database
+    if db_name is not None:
+        span.set_data(SPANDATA.DB_NAME, db_name)
+
+    server_address = conn.engine.url.host
+    if server_address is not None:
+        span.set_data(SPANDATA.SERVER_ADDRESS, server_address)
+
+    server_port = conn.engine.url.port
+    if server_port is not None:
+        span.set_data(SPANDATA.SERVER_PORT, server_port)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlette.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlette.py
new file mode 100644
index 00000000..dbb47dff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlette.py
@@ -0,0 +1,740 @@
+import asyncio
+import functools
+import warnings
+from collections.abc import Set
+from copy import deepcopy
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import (
+    DidNotEnable,
+    Integration,
+    _DEFAULT_FAILED_REQUEST_STATUS_CODES,
+)
+from sentry_sdk.integrations._wsgi_common import (
+    DEFAULT_HTTP_METHODS_TO_CAPTURE,
+    HttpCodeRangeContainer,
+    _is_json_content_type,
+    request_body_within_bounds,
+)
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import (
+    SOURCE_FOR_STYLE,
+    TransactionSource,
+)
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    logger,
+    parse_version,
+    transaction_from_function,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Awaitable, Callable, Container, Dict, Optional, Tuple, Union
+
+    from sentry_sdk._types import Event, HttpStatusCodeRange
+
+try:
+    import starlette  # type: ignore
+    from starlette import __version__ as STARLETTE_VERSION
+    from starlette.applications import Starlette  # type: ignore
+    from starlette.datastructures import UploadFile  # type: ignore
+    from starlette.middleware import Middleware  # type: ignore
+    from starlette.middleware.authentication import (  # type: ignore
+        AuthenticationMiddleware,
+    )
+    from starlette.requests import Request  # type: ignore
+    from starlette.routing import Match  # type: ignore
+    from starlette.types import ASGIApp, Receive, Scope as StarletteScope, Send  # type: ignore
+except ImportError:
+    raise DidNotEnable("Starlette is not installed")
+
+try:
+    # Starlette 0.20
+    from starlette.middleware.exceptions import ExceptionMiddleware  # type: ignore
+except ImportError:
+    # Startlette 0.19.1
+    from starlette.exceptions import ExceptionMiddleware  # type: ignore
+
+try:
+    # Optional dependency of Starlette to parse form data.
+    try:
+        # python-multipart 0.0.13 and later
+        import python_multipart as multipart  # type: ignore
+    except ImportError:
+        # python-multipart 0.0.12 and earlier
+        import multipart  # type: ignore
+except ImportError:
+    multipart = None
+
+
+_DEFAULT_TRANSACTION_NAME = "generic Starlette request"
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class StarletteIntegration(Integration):
+    identifier = "starlette"
+    origin = f"auto.http.{identifier}"
+
+    transaction_style = ""
+
+    def __init__(
+        self,
+        transaction_style="url",  # type: str
+        failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES,  # type: Union[Set[int], list[HttpStatusCodeRange], None]
+        middleware_spans=True,  # type: bool
+        http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE,  # type: tuple[str, ...]
+    ):
+        # type: (...) -> None
+        if transaction_style not in TRANSACTION_STYLE_VALUES:
+            raise ValueError(
+                "Invalid value for transaction_style: %s (must be in %s)"
+                % (transaction_style, TRANSACTION_STYLE_VALUES)
+            )
+        self.transaction_style = transaction_style
+        self.middleware_spans = middleware_spans
+        self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
+
+        if isinstance(failed_request_status_codes, Set):
+            self.failed_request_status_codes = (
+                failed_request_status_codes
+            )  # type: Container[int]
+        else:
+            warnings.warn(
+                "Passing a list or None for failed_request_status_codes is deprecated. "
+                "Please pass a set of int instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+            if failed_request_status_codes is None:
+                self.failed_request_status_codes = _DEFAULT_FAILED_REQUEST_STATUS_CODES
+            else:
+                self.failed_request_status_codes = HttpCodeRangeContainer(
+                    failed_request_status_codes
+                )
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(STARLETTE_VERSION)
+
+        if version is None:
+            raise DidNotEnable(
+                "Unparsable Starlette version: {}".format(STARLETTE_VERSION)
+            )
+
+        patch_middlewares()
+        patch_asgi_app()
+        patch_request_response()
+
+        if version >= (0, 24):
+            patch_templates()
+
+
+def _enable_span_for_middleware(middleware_class):
+    # type: (Any) -> type
+    old_call = middleware_class.__call__
+
+    async def _create_span_call(app, scope, receive, send, **kwargs):
+        # type: (Any, Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]], Any) -> None
+        integration = sentry_sdk.get_client().get_integration(StarletteIntegration)
+        if integration is None or not integration.middleware_spans:
+            return await old_call(app, scope, receive, send, **kwargs)
+
+        middleware_name = app.__class__.__name__
+
+        # Update transaction name with middleware name
+        name, source = _get_transaction_from_middleware(app, scope, integration)
+        if name is not None:
+            sentry_sdk.get_current_scope().set_transaction_name(
+                name,
+                source=source,
+            )
+
+        with sentry_sdk.start_span(
+            op=OP.MIDDLEWARE_STARLETTE,
+            name=middleware_name,
+            origin=StarletteIntegration.origin,
+        ) as middleware_span:
+            middleware_span.set_tag("starlette.middleware_name", middleware_name)
+
+            # Creating spans for the "receive" callback
+            async def _sentry_receive(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_STARLETTE_RECEIVE,
+                    name=getattr(receive, "__qualname__", str(receive)),
+                    origin=StarletteIntegration.origin,
+                ) as span:
+                    span.set_tag("starlette.middleware_name", middleware_name)
+                    return await receive(*args, **kwargs)
+
+            receive_name = getattr(receive, "__name__", str(receive))
+            receive_patched = receive_name == "_sentry_receive"
+            new_receive = _sentry_receive if not receive_patched else receive
+
+            # Creating spans for the "send" callback
+            async def _sentry_send(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_STARLETTE_SEND,
+                    name=getattr(send, "__qualname__", str(send)),
+                    origin=StarletteIntegration.origin,
+                ) as span:
+                    span.set_tag("starlette.middleware_name", middleware_name)
+                    return await send(*args, **kwargs)
+
+            send_name = getattr(send, "__name__", str(send))
+            send_patched = send_name == "_sentry_send"
+            new_send = _sentry_send if not send_patched else send
+
+            return await old_call(app, scope, new_receive, new_send, **kwargs)
+
+    not_yet_patched = old_call.__name__ not in [
+        "_create_span_call",
+        "_sentry_authenticationmiddleware_call",
+        "_sentry_exceptionmiddleware_call",
+    ]
+
+    if not_yet_patched:
+        middleware_class.__call__ = _create_span_call
+
+    return middleware_class
+
+
+@ensure_integration_enabled(StarletteIntegration)
+def _capture_exception(exception, handled=False):
+    # type: (BaseException, **Any) -> None
+    event, hint = event_from_exception(
+        exception,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": StarletteIntegration.identifier, "handled": handled},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def patch_exception_middleware(middleware_class):
+    # type: (Any) -> None
+    """
+    Capture all exceptions in Starlette app and
+    also extract user information.
+    """
+    old_middleware_init = middleware_class.__init__
+
+    not_yet_patched = "_sentry_middleware_init" not in str(old_middleware_init)
+
+    if not_yet_patched:
+
+        def _sentry_middleware_init(self, *args, **kwargs):
+            # type: (Any, Any, Any) -> None
+            old_middleware_init(self, *args, **kwargs)
+
+            # Patch existing exception handlers
+            old_handlers = self._exception_handlers.copy()
+
+            async def _sentry_patched_exception_handler(self, *args, **kwargs):
+                # type: (Any, Any, Any) -> None
+                integration = sentry_sdk.get_client().get_integration(
+                    StarletteIntegration
+                )
+
+                exp = args[0]
+
+                if integration is not None:
+                    is_http_server_error = (
+                        hasattr(exp, "status_code")
+                        and isinstance(exp.status_code, int)
+                        and exp.status_code in integration.failed_request_status_codes
+                    )
+                    if is_http_server_error:
+                        _capture_exception(exp, handled=True)
+
+                # Find a matching handler
+                old_handler = None
+                for cls in type(exp).__mro__:
+                    if cls in old_handlers:
+                        old_handler = old_handlers[cls]
+                        break
+
+                if old_handler is None:
+                    return
+
+                if _is_async_callable(old_handler):
+                    return await old_handler(self, *args, **kwargs)
+                else:
+                    return old_handler(self, *args, **kwargs)
+
+            for key in self._exception_handlers.keys():
+                self._exception_handlers[key] = _sentry_patched_exception_handler
+
+        middleware_class.__init__ = _sentry_middleware_init
+
+        old_call = middleware_class.__call__
+
+        async def _sentry_exceptionmiddleware_call(self, scope, receive, send):
+            # type: (Dict[str, Any], Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]]) -> None
+            # Also add the user (that was eventually set by be Authentication middle
+            # that was called before this middleware). This is done because the authentication
+            # middleware sets the user in the scope and then (in the same function)
+            # calls this exception middelware. In case there is no exception (or no handler
+            # for the type of exception occuring) then the exception bubbles up and setting the
+            # user information into the sentry scope is done in auth middleware and the
+            # ASGI middleware will then send everything to Sentry and this is fine.
+            # But if there is an exception happening that the exception middleware here
+            # has a handler for, it will send the exception directly to Sentry, so we need
+            # the user information right now.
+            # This is why we do it here.
+            _add_user_to_sentry_scope(scope)
+            await old_call(self, scope, receive, send)
+
+        middleware_class.__call__ = _sentry_exceptionmiddleware_call
+
+
+@ensure_integration_enabled(StarletteIntegration)
+def _add_user_to_sentry_scope(scope):
+    # type: (Dict[str, Any]) -> None
+    """
+    Extracts user information from the ASGI scope and
+    adds it to Sentry's scope.
+    """
+    if "user" not in scope:
+        return
+
+    if not should_send_default_pii():
+        return
+
+    user_info = {}  # type: Dict[str, Any]
+    starlette_user = scope["user"]
+
+    username = getattr(starlette_user, "username", None)
+    if username:
+        user_info.setdefault("username", starlette_user.username)
+
+    user_id = getattr(starlette_user, "id", None)
+    if user_id:
+        user_info.setdefault("id", starlette_user.id)
+
+    email = getattr(starlette_user, "email", None)
+    if email:
+        user_info.setdefault("email", starlette_user.email)
+
+    sentry_scope = sentry_sdk.get_isolation_scope()
+    sentry_scope.user = user_info
+
+
+def patch_authentication_middleware(middleware_class):
+    # type: (Any) -> None
+    """
+    Add user information to Sentry scope.
+    """
+    old_call = middleware_class.__call__
+
+    not_yet_patched = "_sentry_authenticationmiddleware_call" not in str(old_call)
+
+    if not_yet_patched:
+
+        async def _sentry_authenticationmiddleware_call(self, scope, receive, send):
+            # type: (Dict[str, Any], Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]]) -> None
+            await old_call(self, scope, receive, send)
+            _add_user_to_sentry_scope(scope)
+
+        middleware_class.__call__ = _sentry_authenticationmiddleware_call
+
+
+def patch_middlewares():
+    # type: () -> None
+    """
+    Patches Starlettes `Middleware` class to record
+    spans for every middleware invoked.
+    """
+    old_middleware_init = Middleware.__init__
+
+    not_yet_patched = "_sentry_middleware_init" not in str(old_middleware_init)
+
+    if not_yet_patched:
+
+        def _sentry_middleware_init(self, cls, *args, **kwargs):
+            # type: (Any, Any, Any, Any) -> None
+            if cls == SentryAsgiMiddleware:
+                return old_middleware_init(self, cls, *args, **kwargs)
+
+            span_enabled_cls = _enable_span_for_middleware(cls)
+            old_middleware_init(self, span_enabled_cls, *args, **kwargs)
+
+            if cls == AuthenticationMiddleware:
+                patch_authentication_middleware(cls)
+
+            if cls == ExceptionMiddleware:
+                patch_exception_middleware(cls)
+
+        Middleware.__init__ = _sentry_middleware_init
+
+
+def patch_asgi_app():
+    # type: () -> None
+    """
+    Instrument Starlette ASGI app using the SentryAsgiMiddleware.
+    """
+    old_app = Starlette.__call__
+
+    async def _sentry_patched_asgi_app(self, scope, receive, send):
+        # type: (Starlette, StarletteScope, Receive, Send) -> None
+        integration = sentry_sdk.get_client().get_integration(StarletteIntegration)
+        if integration is None:
+            return await old_app(self, scope, receive, send)
+
+        middleware = SentryAsgiMiddleware(
+            lambda *a, **kw: old_app(self, *a, **kw),
+            mechanism_type=StarletteIntegration.identifier,
+            transaction_style=integration.transaction_style,
+            span_origin=StarletteIntegration.origin,
+            http_methods_to_capture=(
+                integration.http_methods_to_capture
+                if integration
+                else DEFAULT_HTTP_METHODS_TO_CAPTURE
+            ),
+        )
+
+        middleware.__call__ = middleware._run_asgi3
+        return await middleware(scope, receive, send)
+
+    Starlette.__call__ = _sentry_patched_asgi_app
+
+
+# This was vendored in from Starlette to support Starlette 0.19.1 because
+# this function was only introduced in 0.20.x
+def _is_async_callable(obj):
+    # type: (Any) -> bool
+    while isinstance(obj, functools.partial):
+        obj = obj.func
+
+    return asyncio.iscoroutinefunction(obj) or (
+        callable(obj) and asyncio.iscoroutinefunction(obj.__call__)
+    )
+
+
+def patch_request_response():
+    # type: () -> None
+    old_request_response = starlette.routing.request_response
+
+    def _sentry_request_response(func):
+        # type: (Callable[[Any], Any]) -> ASGIApp
+        old_func = func
+
+        is_coroutine = _is_async_callable(old_func)
+        if is_coroutine:
+
+            async def _sentry_async_func(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                integration = sentry_sdk.get_client().get_integration(
+                    StarletteIntegration
+                )
+                if integration is None:
+                    return await old_func(*args, **kwargs)
+
+                request = args[0]
+
+                _set_transaction_name_and_source(
+                    sentry_sdk.get_current_scope(),
+                    integration.transaction_style,
+                    request,
+                )
+
+                sentry_scope = sentry_sdk.get_isolation_scope()
+                extractor = StarletteRequestExtractor(request)
+                info = await extractor.extract_request_info()
+
+                def _make_request_event_processor(req, integration):
+                    # type: (Any, Any) -> Callable[[Event, dict[str, Any]], Event]
+                    def event_processor(event, hint):
+                        # type: (Event, Dict[str, Any]) -> Event
+
+                        # Add info from request to event
+                        request_info = event.get("request", {})
+                        if info:
+                            if "cookies" in info:
+                                request_info["cookies"] = info["cookies"]
+                            if "data" in info:
+                                request_info["data"] = info["data"]
+                        event["request"] = deepcopy(request_info)
+
+                        return event
+
+                    return event_processor
+
+                sentry_scope._name = StarletteIntegration.identifier
+                sentry_scope.add_event_processor(
+                    _make_request_event_processor(request, integration)
+                )
+
+                return await old_func(*args, **kwargs)
+
+            func = _sentry_async_func
+
+        else:
+
+            @functools.wraps(old_func)
+            def _sentry_sync_func(*args, **kwargs):
+                # type: (*Any, **Any) -> Any
+                integration = sentry_sdk.get_client().get_integration(
+                    StarletteIntegration
+                )
+                if integration is None:
+                    return old_func(*args, **kwargs)
+
+                current_scope = sentry_sdk.get_current_scope()
+                if current_scope.transaction is not None:
+                    current_scope.transaction.update_active_thread()
+
+                sentry_scope = sentry_sdk.get_isolation_scope()
+                if sentry_scope.profile is not None:
+                    sentry_scope.profile.update_active_thread_id()
+
+                request = args[0]
+
+                _set_transaction_name_and_source(
+                    sentry_scope, integration.transaction_style, request
+                )
+
+                extractor = StarletteRequestExtractor(request)
+                cookies = extractor.extract_cookies_from_request()
+
+                def _make_request_event_processor(req, integration):
+                    # type: (Any, Any) -> Callable[[Event, dict[str, Any]], Event]
+                    def event_processor(event, hint):
+                        # type: (Event, dict[str, Any]) -> Event
+
+                        # Extract information from request
+                        request_info = event.get("request", {})
+                        if cookies:
+                            request_info["cookies"] = cookies
+
+                        event["request"] = deepcopy(request_info)
+
+                        return event
+
+                    return event_processor
+
+                sentry_scope._name = StarletteIntegration.identifier
+                sentry_scope.add_event_processor(
+                    _make_request_event_processor(request, integration)
+                )
+
+                return old_func(*args, **kwargs)
+
+            func = _sentry_sync_func
+
+        return old_request_response(func)
+
+    starlette.routing.request_response = _sentry_request_response
+
+
+def patch_templates():
+    # type: () -> None
+
+    # If markupsafe is not installed, then Jinja2 is not installed
+    # (markupsafe is a dependency of Jinja2)
+    # In this case we do not need to patch the Jinja2Templates class
+    try:
+        from markupsafe import Markup
+    except ImportError:
+        return  # Nothing to do
+
+    from starlette.templating import Jinja2Templates  # type: ignore
+
+    old_jinja2templates_init = Jinja2Templates.__init__
+
+    not_yet_patched = "_sentry_jinja2templates_init" not in str(
+        old_jinja2templates_init
+    )
+
+    if not_yet_patched:
+
+        def _sentry_jinja2templates_init(self, *args, **kwargs):
+            # type: (Jinja2Templates, *Any, **Any) -> None
+            def add_sentry_trace_meta(request):
+                # type: (Request) -> Dict[str, Any]
+                trace_meta = Markup(
+                    sentry_sdk.get_current_scope().trace_propagation_meta()
+                )
+                return {
+                    "sentry_trace_meta": trace_meta,
+                }
+
+            kwargs.setdefault("context_processors", [])
+
+            if add_sentry_trace_meta not in kwargs["context_processors"]:
+                kwargs["context_processors"].append(add_sentry_trace_meta)
+
+            return old_jinja2templates_init(self, *args, **kwargs)
+
+        Jinja2Templates.__init__ = _sentry_jinja2templates_init
+
+
+class StarletteRequestExtractor:
+    """
+    Extracts useful information from the Starlette request
+    (like form data or cookies) and adds it to the Sentry event.
+    """
+
+    request = None  # type: Request
+
+    def __init__(self, request):
+        # type: (StarletteRequestExtractor, Request) -> None
+        self.request = request
+
+    def extract_cookies_from_request(self):
+        # type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
+        cookies = None  # type: Optional[Dict[str, Any]]
+        if should_send_default_pii():
+            cookies = self.cookies()
+
+        return cookies
+
+    async def extract_request_info(self):
+        # type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
+        client = sentry_sdk.get_client()
+
+        request_info = {}  # type: Dict[str, Any]
+
+        with capture_internal_exceptions():
+            # Add cookies
+            if should_send_default_pii():
+                request_info["cookies"] = self.cookies()
+
+            # If there is no body, just return the cookies
+            content_length = await self.content_length()
+            if not content_length:
+                return request_info
+
+            # Add annotation if body is too big
+            if content_length and not request_body_within_bounds(
+                client, content_length
+            ):
+                request_info["data"] = AnnotatedValue.removed_because_over_size_limit()
+                return request_info
+
+            # Add JSON body, if it is a JSON request
+            json = await self.json()
+            if json:
+                request_info["data"] = json
+                return request_info
+
+            # Add form as key/value pairs, if request has form data
+            form = await self.form()
+            if form:
+                form_data = {}
+                for key, val in form.items():
+                    is_file = isinstance(val, UploadFile)
+                    form_data[key] = (
+                        val
+                        if not is_file
+                        else AnnotatedValue.removed_because_raw_data()
+                    )
+
+                request_info["data"] = form_data
+                return request_info
+
+            # Raw data, do not add body just an annotation
+            request_info["data"] = AnnotatedValue.removed_because_raw_data()
+            return request_info
+
+    async def content_length(self):
+        # type: (StarletteRequestExtractor) -> Optional[int]
+        if "content-length" in self.request.headers:
+            return int(self.request.headers["content-length"])
+
+        return None
+
+    def cookies(self):
+        # type: (StarletteRequestExtractor) -> Dict[str, Any]
+        return self.request.cookies
+
+    async def form(self):
+        # type: (StarletteRequestExtractor) -> Any
+        if multipart is None:
+            return None
+
+        # Parse the body first to get it cached, as Starlette does not cache form() as it
+        # does with body() and json() https://github.com/encode/starlette/discussions/1933
+        # Calling `.form()` without calling `.body()` first will
+        # potentially break the users project.
+        await self.request.body()
+
+        return await self.request.form()
+
+    def is_json(self):
+        # type: (StarletteRequestExtractor) -> bool
+        return _is_json_content_type(self.request.headers.get("content-type"))
+
+    async def json(self):
+        # type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
+        if not self.is_json():
+            return None
+
+        return await self.request.json()
+
+
+def _transaction_name_from_router(scope):
+    # type: (StarletteScope) -> Optional[str]
+    router = scope.get("router")
+    if not router:
+        return None
+
+    for route in router.routes:
+        match = route.matches(scope)
+        if match[0] == Match.FULL:
+            try:
+                return route.path
+            except AttributeError:
+                # routes added via app.host() won't have a path attribute
+                return scope.get("path")
+
+    return None
+
+
+def _set_transaction_name_and_source(scope, transaction_style, request):
+    # type: (sentry_sdk.Scope, str, Any) -> None
+    name = None
+    source = SOURCE_FOR_STYLE[transaction_style]
+
+    if transaction_style == "endpoint":
+        endpoint = request.scope.get("endpoint")
+        if endpoint:
+            name = transaction_from_function(endpoint) or None
+
+    elif transaction_style == "url":
+        name = _transaction_name_from_router(request.scope)
+
+    if name is None:
+        name = _DEFAULT_TRANSACTION_NAME
+        source = TransactionSource.ROUTE
+
+    scope.set_transaction_name(name, source=source)
+    logger.debug(
+        "[Starlette] Set transaction name and source on scope: %s / %s", name, source
+    )
+
+
+def _get_transaction_from_middleware(app, asgi_scope, integration):
+    # type: (Any, Dict[str, Any], StarletteIntegration) -> Tuple[Optional[str], Optional[str]]
+    name = None
+    source = None
+
+    if integration.transaction_style == "endpoint":
+        name = transaction_from_function(app.__class__)
+        source = TransactionSource.COMPONENT
+    elif integration.transaction_style == "url":
+        name = _transaction_name_from_router(asgi_scope)
+        source = TransactionSource.ROUTE
+
+    return name, source
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlite.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlite.py
new file mode 100644
index 00000000..24707a18
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/starlite.py
@@ -0,0 +1,292 @@
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import DidNotEnable, Integration
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource
+from sentry_sdk.utils import (
+    ensure_integration_enabled,
+    event_from_exception,
+    transaction_from_function,
+)
+
+try:
+    from starlite import Request, Starlite, State  # type: ignore
+    from starlite.handlers.base import BaseRouteHandler  # type: ignore
+    from starlite.middleware import DefineMiddleware  # type: ignore
+    from starlite.plugins.base import get_plugin_for_value  # type: ignore
+    from starlite.routes.http import HTTPRoute  # type: ignore
+    from starlite.utils import ConnectionDataExtractor, is_async_callable, Ref  # type: ignore
+    from pydantic import BaseModel  # type: ignore
+except ImportError:
+    raise DidNotEnable("Starlite is not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Optional, Union
+    from starlite.types import (  # type: ignore
+        ASGIApp,
+        Hint,
+        HTTPReceiveMessage,
+        HTTPScope,
+        Message,
+        Middleware,
+        Receive,
+        Scope as StarliteScope,
+        Send,
+        WebSocketReceiveMessage,
+    )
+    from starlite import MiddlewareProtocol
+    from sentry_sdk._types import Event
+
+
+_DEFAULT_TRANSACTION_NAME = "generic Starlite request"
+
+
+class StarliteIntegration(Integration):
+    identifier = "starlite"
+    origin = f"auto.http.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        patch_app_init()
+        patch_middlewares()
+        patch_http_route_handle()
+
+
+class SentryStarliteASGIMiddleware(SentryAsgiMiddleware):
+    def __init__(self, app, span_origin=StarliteIntegration.origin):
+        # type: (ASGIApp, str) -> None
+        super().__init__(
+            app=app,
+            unsafe_context_data=False,
+            transaction_style="endpoint",
+            mechanism_type="asgi",
+            span_origin=span_origin,
+        )
+
+
+def patch_app_init():
+    # type: () -> None
+    """
+    Replaces the Starlite class's `__init__` function in order to inject `after_exception` handlers and set the
+    `SentryStarliteASGIMiddleware` as the outmost middleware in the stack.
+    See:
+    - https://starlite-api.github.io/starlite/usage/0-the-starlite-app/5-application-hooks/#after-exception
+    - https://starlite-api.github.io/starlite/usage/7-middleware/0-middleware-intro/
+    """
+    old__init__ = Starlite.__init__
+
+    @ensure_integration_enabled(StarliteIntegration, old__init__)
+    def injection_wrapper(self, *args, **kwargs):
+        # type: (Starlite, *Any, **Any) -> None
+        after_exception = kwargs.pop("after_exception", [])
+        kwargs.update(
+            after_exception=[
+                exception_handler,
+                *(
+                    after_exception
+                    if isinstance(after_exception, list)
+                    else [after_exception]
+                ),
+            ]
+        )
+
+        SentryStarliteASGIMiddleware.__call__ = SentryStarliteASGIMiddleware._run_asgi3  # type: ignore
+        middleware = kwargs.get("middleware") or []
+        kwargs["middleware"] = [SentryStarliteASGIMiddleware, *middleware]
+        old__init__(self, *args, **kwargs)
+
+    Starlite.__init__ = injection_wrapper
+
+
+def patch_middlewares():
+    # type: () -> None
+    old_resolve_middleware_stack = BaseRouteHandler.resolve_middleware
+
+    @ensure_integration_enabled(StarliteIntegration, old_resolve_middleware_stack)
+    def resolve_middleware_wrapper(self):
+        # type: (BaseRouteHandler) -> list[Middleware]
+        return [
+            enable_span_for_middleware(middleware)
+            for middleware in old_resolve_middleware_stack(self)
+        ]
+
+    BaseRouteHandler.resolve_middleware = resolve_middleware_wrapper
+
+
+def enable_span_for_middleware(middleware):
+    # type: (Middleware) -> Middleware
+    if (
+        not hasattr(middleware, "__call__")  # noqa: B004
+        or middleware is SentryStarliteASGIMiddleware
+    ):
+        return middleware
+
+    if isinstance(middleware, DefineMiddleware):
+        old_call = middleware.middleware.__call__  # type: ASGIApp
+    else:
+        old_call = middleware.__call__
+
+    async def _create_span_call(self, scope, receive, send):
+        # type: (MiddlewareProtocol, StarliteScope, Receive, Send) -> None
+        if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
+            return await old_call(self, scope, receive, send)
+
+        middleware_name = self.__class__.__name__
+        with sentry_sdk.start_span(
+            op=OP.MIDDLEWARE_STARLITE,
+            name=middleware_name,
+            origin=StarliteIntegration.origin,
+        ) as middleware_span:
+            middleware_span.set_tag("starlite.middleware_name", middleware_name)
+
+            # Creating spans for the "receive" callback
+            async def _sentry_receive(*args, **kwargs):
+                # type: (*Any, **Any) -> Union[HTTPReceiveMessage, WebSocketReceiveMessage]
+                if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
+                    return await receive(*args, **kwargs)
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_STARLITE_RECEIVE,
+                    name=getattr(receive, "__qualname__", str(receive)),
+                    origin=StarliteIntegration.origin,
+                ) as span:
+                    span.set_tag("starlite.middleware_name", middleware_name)
+                    return await receive(*args, **kwargs)
+
+            receive_name = getattr(receive, "__name__", str(receive))
+            receive_patched = receive_name == "_sentry_receive"
+            new_receive = _sentry_receive if not receive_patched else receive
+
+            # Creating spans for the "send" callback
+            async def _sentry_send(message):
+                # type: (Message) -> None
+                if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
+                    return await send(message)
+                with sentry_sdk.start_span(
+                    op=OP.MIDDLEWARE_STARLITE_SEND,
+                    name=getattr(send, "__qualname__", str(send)),
+                    origin=StarliteIntegration.origin,
+                ) as span:
+                    span.set_tag("starlite.middleware_name", middleware_name)
+                    return await send(message)
+
+            send_name = getattr(send, "__name__", str(send))
+            send_patched = send_name == "_sentry_send"
+            new_send = _sentry_send if not send_patched else send
+
+            return await old_call(self, scope, new_receive, new_send)
+
+    not_yet_patched = old_call.__name__ not in ["_create_span_call"]
+
+    if not_yet_patched:
+        if isinstance(middleware, DefineMiddleware):
+            middleware.middleware.__call__ = _create_span_call
+        else:
+            middleware.__call__ = _create_span_call
+
+    return middleware
+
+
+def patch_http_route_handle():
+    # type: () -> None
+    old_handle = HTTPRoute.handle
+
+    async def handle_wrapper(self, scope, receive, send):
+        # type: (HTTPRoute, HTTPScope, Receive, Send) -> None
+        if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
+            return await old_handle(self, scope, receive, send)
+
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        request = scope["app"].request_class(
+            scope=scope, receive=receive, send=send
+        )  # type: Request[Any, Any]
+        extracted_request_data = ConnectionDataExtractor(
+            parse_body=True, parse_query=True
+        )(request)
+        body = extracted_request_data.pop("body")
+
+        request_data = await body
+
+        def event_processor(event, _):
+            # type: (Event, Hint) -> Event
+            route_handler = scope.get("route_handler")
+
+            request_info = event.get("request", {})
+            request_info["content_length"] = len(scope.get("_body", b""))
+            if should_send_default_pii():
+                request_info["cookies"] = extracted_request_data["cookies"]
+            if request_data is not None:
+                request_info["data"] = request_data
+
+            func = None
+            if route_handler.name is not None:
+                tx_name = route_handler.name
+            elif isinstance(route_handler.fn, Ref):
+                func = route_handler.fn.value
+            else:
+                func = route_handler.fn
+            if func is not None:
+                tx_name = transaction_from_function(func)
+
+            tx_info = {"source": SOURCE_FOR_STYLE["endpoint"]}
+
+            if not tx_name:
+                tx_name = _DEFAULT_TRANSACTION_NAME
+                tx_info = {"source": TransactionSource.ROUTE}
+
+            event.update(
+                {
+                    "request": request_info,
+                    "transaction": tx_name,
+                    "transaction_info": tx_info,
+                }
+            )
+            return event
+
+        sentry_scope._name = StarliteIntegration.identifier
+        sentry_scope.add_event_processor(event_processor)
+
+        return await old_handle(self, scope, receive, send)
+
+    HTTPRoute.handle = handle_wrapper
+
+
+def retrieve_user_from_scope(scope):
+    # type: (StarliteScope) -> Optional[dict[str, Any]]
+    scope_user = scope.get("user")
+    if not scope_user:
+        return None
+    if isinstance(scope_user, dict):
+        return scope_user
+    if isinstance(scope_user, BaseModel):
+        return scope_user.dict()
+    if hasattr(scope_user, "asdict"):  # dataclasses
+        return scope_user.asdict()
+
+    plugin = get_plugin_for_value(scope_user)
+    if plugin and not is_async_callable(plugin.to_dict):
+        return plugin.to_dict(scope_user)
+
+    return None
+
+
+@ensure_integration_enabled(StarliteIntegration)
+def exception_handler(exc, scope, _):
+    # type: (Exception, StarliteScope, State) -> None
+    user_info = None  # type: Optional[dict[str, Any]]
+    if should_send_default_pii():
+        user_info = retrieve_user_from_scope(scope)
+    if user_info and isinstance(user_info, dict):
+        sentry_scope = sentry_sdk.get_isolation_scope()
+        sentry_scope.set_user(user_info)
+
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": StarliteIntegration.identifier, "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/statsig.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/statsig.py
new file mode 100644
index 00000000..1d84eb8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/statsig.py
@@ -0,0 +1,37 @@
+from functools import wraps
+from typing import Any, TYPE_CHECKING
+
+from sentry_sdk.feature_flags import add_feature_flag
+from sentry_sdk.integrations import Integration, DidNotEnable, _check_minimum_version
+from sentry_sdk.utils import parse_version
+
+try:
+    from statsig import statsig as statsig_module
+    from statsig.version import __version__ as STATSIG_VERSION
+except ImportError:
+    raise DidNotEnable("statsig is not installed")
+
+if TYPE_CHECKING:
+    from statsig.statsig_user import StatsigUser
+
+
+class StatsigIntegration(Integration):
+    identifier = "statsig"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = parse_version(STATSIG_VERSION)
+        _check_minimum_version(StatsigIntegration, version, "statsig")
+
+        # Wrap and patch evaluation method(s) in the statsig module
+        old_check_gate = statsig_module.check_gate
+
+        @wraps(old_check_gate)
+        def sentry_check_gate(user, gate, *args, **kwargs):
+            # type: (StatsigUser, str, *Any, **Any) -> Any
+            enabled = old_check_gate(user, gate, *args, **kwargs)
+            add_feature_flag(gate, enabled)
+            return enabled
+
+        statsig_module.check_gate = sentry_check_gate
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/stdlib.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/stdlib.py
new file mode 100644
index 00000000..d388c5bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/stdlib.py
@@ -0,0 +1,265 @@
+import os
+import subprocess
+import sys
+import platform
+from http.client import HTTPConnection
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.tracing_utils import EnvironHeaders, should_propagate_trace
+from sentry_sdk.utils import (
+    SENSITIVE_DATA_SUBSTITUTE,
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    is_sentry_url,
+    logger,
+    safe_repr,
+    parse_url,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Optional
+    from typing import List
+
+    from sentry_sdk._types import Event, Hint
+
+
+_RUNTIME_CONTEXT = {
+    "name": platform.python_implementation(),
+    "version": "%s.%s.%s" % (sys.version_info[:3]),
+    "build": sys.version,
+}  # type: dict[str, object]
+
+
+class StdlibIntegration(Integration):
+    identifier = "stdlib"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _install_httplib()
+        _install_subprocess()
+
+        @add_global_event_processor
+        def add_python_runtime_context(event, hint):
+            # type: (Event, Hint) -> Optional[Event]
+            if sentry_sdk.get_client().get_integration(StdlibIntegration) is not None:
+                contexts = event.setdefault("contexts", {})
+                if isinstance(contexts, dict) and "runtime" not in contexts:
+                    contexts["runtime"] = _RUNTIME_CONTEXT
+
+            return event
+
+
+def _install_httplib():
+    # type: () -> None
+    real_putrequest = HTTPConnection.putrequest
+    real_getresponse = HTTPConnection.getresponse
+
+    def putrequest(self, method, url, *args, **kwargs):
+        # type: (HTTPConnection, str, str, *Any, **Any) -> Any
+        host = self.host
+        port = self.port
+        default_port = self.default_port
+
+        client = sentry_sdk.get_client()
+        if client.get_integration(StdlibIntegration) is None or is_sentry_url(
+            client, host
+        ):
+            return real_putrequest(self, method, url, *args, **kwargs)
+
+        real_url = url
+        if real_url is None or not real_url.startswith(("http://", "https://")):
+            real_url = "%s://%s%s%s" % (
+                default_port == 443 and "https" or "http",
+                host,
+                port != default_port and ":%s" % port or "",
+                url,
+            )
+
+        parsed_url = None
+        with capture_internal_exceptions():
+            parsed_url = parse_url(real_url, sanitize=False)
+
+        span = sentry_sdk.start_span(
+            op=OP.HTTP_CLIENT,
+            name="%s %s"
+            % (method, parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE),
+            origin="auto.http.stdlib.httplib",
+        )
+        span.set_data(SPANDATA.HTTP_METHOD, method)
+        if parsed_url is not None:
+            span.set_data("url", parsed_url.url)
+            span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query)
+            span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment)
+
+        rv = real_putrequest(self, method, url, *args, **kwargs)
+
+        if should_propagate_trace(client, real_url):
+            for (
+                key,
+                value,
+            ) in sentry_sdk.get_current_scope().iter_trace_propagation_headers(
+                span=span
+            ):
+                logger.debug(
+                    "[Tracing] Adding `{key}` header {value} to outgoing request to {real_url}.".format(
+                        key=key, value=value, real_url=real_url
+                    )
+                )
+                self.putheader(key, value)
+
+        self._sentrysdk_span = span  # type: ignore[attr-defined]
+
+        return rv
+
+    def getresponse(self, *args, **kwargs):
+        # type: (HTTPConnection, *Any, **Any) -> Any
+        span = getattr(self, "_sentrysdk_span", None)
+
+        if span is None:
+            return real_getresponse(self, *args, **kwargs)
+
+        try:
+            rv = real_getresponse(self, *args, **kwargs)
+
+            span.set_http_status(int(rv.status))
+            span.set_data("reason", rv.reason)
+        finally:
+            span.finish()
+
+        return rv
+
+    HTTPConnection.putrequest = putrequest  # type: ignore[method-assign]
+    HTTPConnection.getresponse = getresponse  # type: ignore[method-assign]
+
+
+def _init_argument(args, kwargs, name, position, setdefault_callback=None):
+    # type: (List[Any], Dict[Any, Any], str, int, Optional[Callable[[Any], Any]]) -> Any
+    """
+    given (*args, **kwargs) of a function call, retrieve (and optionally set a
+    default for) an argument by either name or position.
+
+    This is useful for wrapping functions with complex type signatures and
+    extracting a few arguments without needing to redefine that function's
+    entire type signature.
+    """
+
+    if name in kwargs:
+        rv = kwargs[name]
+        if setdefault_callback is not None:
+            rv = setdefault_callback(rv)
+        if rv is not None:
+            kwargs[name] = rv
+    elif position < len(args):
+        rv = args[position]
+        if setdefault_callback is not None:
+            rv = setdefault_callback(rv)
+        if rv is not None:
+            args[position] = rv
+    else:
+        rv = setdefault_callback and setdefault_callback(None)
+        if rv is not None:
+            kwargs[name] = rv
+
+    return rv
+
+
+def _install_subprocess():
+    # type: () -> None
+    old_popen_init = subprocess.Popen.__init__
+
+    @ensure_integration_enabled(StdlibIntegration, old_popen_init)
+    def sentry_patched_popen_init(self, *a, **kw):
+        # type: (subprocess.Popen[Any], *Any, **Any) -> None
+        # Convert from tuple to list to be able to set values.
+        a = list(a)
+
+        args = _init_argument(a, kw, "args", 0) or []
+        cwd = _init_argument(a, kw, "cwd", 9)
+
+        # if args is not a list or tuple (and e.g. some iterator instead),
+        # let's not use it at all. There are too many things that can go wrong
+        # when trying to collect an iterator into a list and setting that list
+        # into `a` again.
+        #
+        # Also invocations where `args` is not a sequence are not actually
+        # legal. They just happen to work under CPython.
+        description = None
+
+        if isinstance(args, (list, tuple)) and len(args) < 100:
+            with capture_internal_exceptions():
+                description = " ".join(map(str, args))
+
+        if description is None:
+            description = safe_repr(args)
+
+        env = None
+
+        with sentry_sdk.start_span(
+            op=OP.SUBPROCESS,
+            name=description,
+            origin="auto.subprocess.stdlib.subprocess",
+        ) as span:
+            for k, v in sentry_sdk.get_current_scope().iter_trace_propagation_headers(
+                span=span
+            ):
+                if env is None:
+                    env = _init_argument(
+                        a,
+                        kw,
+                        "env",
+                        10,
+                        lambda x: dict(x if x is not None else os.environ),
+                    )
+                env["SUBPROCESS_" + k.upper().replace("-", "_")] = v
+
+            if cwd:
+                span.set_data("subprocess.cwd", cwd)
+
+            rv = old_popen_init(self, *a, **kw)
+
+            span.set_tag("subprocess.pid", self.pid)
+            return rv
+
+    subprocess.Popen.__init__ = sentry_patched_popen_init  # type: ignore
+
+    old_popen_wait = subprocess.Popen.wait
+
+    @ensure_integration_enabled(StdlibIntegration, old_popen_wait)
+    def sentry_patched_popen_wait(self, *a, **kw):
+        # type: (subprocess.Popen[Any], *Any, **Any) -> Any
+        with sentry_sdk.start_span(
+            op=OP.SUBPROCESS_WAIT,
+            origin="auto.subprocess.stdlib.subprocess",
+        ) as span:
+            span.set_tag("subprocess.pid", self.pid)
+            return old_popen_wait(self, *a, **kw)
+
+    subprocess.Popen.wait = sentry_patched_popen_wait  # type: ignore
+
+    old_popen_communicate = subprocess.Popen.communicate
+
+    @ensure_integration_enabled(StdlibIntegration, old_popen_communicate)
+    def sentry_patched_popen_communicate(self, *a, **kw):
+        # type: (subprocess.Popen[Any], *Any, **Any) -> Any
+        with sentry_sdk.start_span(
+            op=OP.SUBPROCESS_COMMUNICATE,
+            origin="auto.subprocess.stdlib.subprocess",
+        ) as span:
+            span.set_tag("subprocess.pid", self.pid)
+            return old_popen_communicate(self, *a, **kw)
+
+    subprocess.Popen.communicate = sentry_patched_popen_communicate  # type: ignore
+
+
+def get_subprocess_traceparent_headers():
+    # type: () -> EnvironHeaders
+    return EnvironHeaders(os.environ, prefix="SUBPROCESS_")
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/strawberry.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/strawberry.py
new file mode 100644
index 00000000..ae7d2730
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/strawberry.py
@@ -0,0 +1,393 @@
+import functools
+import hashlib
+from inspect import isawaitable
+
+import sentry_sdk
+from sentry_sdk.consts import OP
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    ensure_integration_enabled,
+    event_from_exception,
+    logger,
+    package_version,
+    _get_installed_modules,
+)
+
+try:
+    from functools import cached_property
+except ImportError:
+    # The strawberry integration requires Python 3.8+. functools.cached_property
+    # was added in 3.8, so this check is technically not needed, but since this
+    # is an auto-enabling integration, we might get to executing this import in
+    # lower Python versions, so we need to deal with it.
+    raise DidNotEnable("strawberry-graphql integration requires Python 3.8 or newer")
+
+try:
+    from strawberry import Schema
+    from strawberry.extensions import SchemaExtension
+    from strawberry.extensions.tracing.utils import (
+        should_skip_tracing as strawberry_should_skip_tracing,
+    )
+    from strawberry.http import async_base_view, sync_base_view
+except ImportError:
+    raise DidNotEnable("strawberry-graphql is not installed")
+
+try:
+    from strawberry.extensions.tracing import (
+        SentryTracingExtension as StrawberrySentryAsyncExtension,
+        SentryTracingExtensionSync as StrawberrySentrySyncExtension,
+    )
+except ImportError:
+    StrawberrySentryAsyncExtension = None
+    StrawberrySentrySyncExtension = None
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any, Callable, Generator, List, Optional
+    from graphql import GraphQLError, GraphQLResolveInfo
+    from strawberry.http import GraphQLHTTPResponse
+    from strawberry.types import ExecutionContext
+    from sentry_sdk._types import Event, EventProcessor
+
+
+ignore_logger("strawberry.execution")
+
+
+class StrawberryIntegration(Integration):
+    identifier = "strawberry"
+    origin = f"auto.graphql.{identifier}"
+
+    def __init__(self, async_execution=None):
+        # type: (Optional[bool]) -> None
+        if async_execution not in (None, False, True):
+            raise ValueError(
+                'Invalid value for async_execution: "{}" (must be bool)'.format(
+                    async_execution
+                )
+            )
+        self.async_execution = async_execution
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        version = package_version("strawberry-graphql")
+        _check_minimum_version(StrawberryIntegration, version, "strawberry-graphql")
+
+        _patch_schema_init()
+        _patch_views()
+
+
+def _patch_schema_init():
+    # type: () -> None
+    old_schema_init = Schema.__init__
+
+    @functools.wraps(old_schema_init)
+    def _sentry_patched_schema_init(self, *args, **kwargs):
+        # type: (Schema, Any, Any) -> None
+        integration = sentry_sdk.get_client().get_integration(StrawberryIntegration)
+        if integration is None:
+            return old_schema_init(self, *args, **kwargs)
+
+        extensions = kwargs.get("extensions") or []
+
+        if integration.async_execution is not None:
+            should_use_async_extension = integration.async_execution
+        else:
+            # try to figure it out ourselves
+            should_use_async_extension = _guess_if_using_async(extensions)
+
+            logger.info(
+                "Assuming strawberry is running %s. If not, initialize it as StrawberryIntegration(async_execution=%s).",
+                "async" if should_use_async_extension else "sync",
+                "False" if should_use_async_extension else "True",
+            )
+
+        # remove the built in strawberry sentry extension, if present
+        extensions = [
+            extension
+            for extension in extensions
+            if extension
+            not in (StrawberrySentryAsyncExtension, StrawberrySentrySyncExtension)
+        ]
+
+        # add our extension
+        extensions.append(
+            SentryAsyncExtension if should_use_async_extension else SentrySyncExtension
+        )
+
+        kwargs["extensions"] = extensions
+
+        return old_schema_init(self, *args, **kwargs)
+
+    Schema.__init__ = _sentry_patched_schema_init  # type: ignore[method-assign]
+
+
+class SentryAsyncExtension(SchemaExtension):
+    def __init__(
+        self,
+        *,
+        execution_context=None,
+    ):
+        # type: (Any, Optional[ExecutionContext]) -> None
+        if execution_context:
+            self.execution_context = execution_context
+
+    @cached_property
+    def _resource_name(self):
+        # type: () -> str
+        query_hash = self.hash_query(self.execution_context.query)  # type: ignore
+
+        if self.execution_context.operation_name:
+            return "{}:{}".format(self.execution_context.operation_name, query_hash)
+
+        return query_hash
+
+    def hash_query(self, query):
+        # type: (str) -> str
+        return hashlib.md5(query.encode("utf-8")).hexdigest()
+
+    def on_operation(self):
+        # type: () -> Generator[None, None, None]
+        self._operation_name = self.execution_context.operation_name
+
+        operation_type = "query"
+        op = OP.GRAPHQL_QUERY
+
+        if self.execution_context.query is None:
+            self.execution_context.query = ""
+
+        if self.execution_context.query.strip().startswith("mutation"):
+            operation_type = "mutation"
+            op = OP.GRAPHQL_MUTATION
+        elif self.execution_context.query.strip().startswith("subscription"):
+            operation_type = "subscription"
+            op = OP.GRAPHQL_SUBSCRIPTION
+
+        description = operation_type
+        if self._operation_name:
+            description += " {}".format(self._operation_name)
+
+        sentry_sdk.add_breadcrumb(
+            category="graphql.operation",
+            data={
+                "operation_name": self._operation_name,
+                "operation_type": operation_type,
+            },
+        )
+
+        scope = sentry_sdk.get_isolation_scope()
+        event_processor = _make_request_event_processor(self.execution_context)
+        scope.add_event_processor(event_processor)
+
+        span = sentry_sdk.get_current_span()
+        if span:
+            self.graphql_span = span.start_child(
+                op=op,
+                name=description,
+                origin=StrawberryIntegration.origin,
+            )
+        else:
+            self.graphql_span = sentry_sdk.start_span(
+                op=op,
+                name=description,
+                origin=StrawberryIntegration.origin,
+            )
+
+        self.graphql_span.set_data("graphql.operation.type", operation_type)
+        self.graphql_span.set_data("graphql.operation.name", self._operation_name)
+        self.graphql_span.set_data("graphql.document", self.execution_context.query)
+        self.graphql_span.set_data("graphql.resource_name", self._resource_name)
+
+        yield
+
+        transaction = self.graphql_span.containing_transaction
+        if transaction and self.execution_context.operation_name:
+            transaction.name = self.execution_context.operation_name
+            transaction.source = TransactionSource.COMPONENT
+            transaction.op = op
+
+        self.graphql_span.finish()
+
+    def on_validate(self):
+        # type: () -> Generator[None, None, None]
+        self.validation_span = self.graphql_span.start_child(
+            op=OP.GRAPHQL_VALIDATE,
+            name="validation",
+            origin=StrawberryIntegration.origin,
+        )
+
+        yield
+
+        self.validation_span.finish()
+
+    def on_parse(self):
+        # type: () -> Generator[None, None, None]
+        self.parsing_span = self.graphql_span.start_child(
+            op=OP.GRAPHQL_PARSE,
+            name="parsing",
+            origin=StrawberryIntegration.origin,
+        )
+
+        yield
+
+        self.parsing_span.finish()
+
+    def should_skip_tracing(self, _next, info):
+        # type: (Callable[[Any, GraphQLResolveInfo, Any, Any], Any], GraphQLResolveInfo) -> bool
+        return strawberry_should_skip_tracing(_next, info)
+
+    async def _resolve(self, _next, root, info, *args, **kwargs):
+        # type: (Callable[[Any, GraphQLResolveInfo, Any, Any], Any], Any, GraphQLResolveInfo, str, Any) -> Any
+        result = _next(root, info, *args, **kwargs)
+
+        if isawaitable(result):
+            result = await result
+
+        return result
+
+    async def resolve(self, _next, root, info, *args, **kwargs):
+        # type: (Callable[[Any, GraphQLResolveInfo, Any, Any], Any], Any, GraphQLResolveInfo, str, Any) -> Any
+        if self.should_skip_tracing(_next, info):
+            return await self._resolve(_next, root, info, *args, **kwargs)
+
+        field_path = "{}.{}".format(info.parent_type, info.field_name)
+
+        with self.graphql_span.start_child(
+            op=OP.GRAPHQL_RESOLVE,
+            name="resolving {}".format(field_path),
+            origin=StrawberryIntegration.origin,
+        ) as span:
+            span.set_data("graphql.field_name", info.field_name)
+            span.set_data("graphql.parent_type", info.parent_type.name)
+            span.set_data("graphql.field_path", field_path)
+            span.set_data("graphql.path", ".".join(map(str, info.path.as_list())))
+
+            return await self._resolve(_next, root, info, *args, **kwargs)
+
+
+class SentrySyncExtension(SentryAsyncExtension):
+    def resolve(self, _next, root, info, *args, **kwargs):
+        # type: (Callable[[Any, Any, Any, Any], Any], Any, GraphQLResolveInfo, str, Any) -> Any
+        if self.should_skip_tracing(_next, info):
+            return _next(root, info, *args, **kwargs)
+
+        field_path = "{}.{}".format(info.parent_type, info.field_name)
+
+        with self.graphql_span.start_child(
+            op=OP.GRAPHQL_RESOLVE,
+            name="resolving {}".format(field_path),
+            origin=StrawberryIntegration.origin,
+        ) as span:
+            span.set_data("graphql.field_name", info.field_name)
+            span.set_data("graphql.parent_type", info.parent_type.name)
+            span.set_data("graphql.field_path", field_path)
+            span.set_data("graphql.path", ".".join(map(str, info.path.as_list())))
+
+            return _next(root, info, *args, **kwargs)
+
+
+def _patch_views():
+    # type: () -> None
+    old_async_view_handle_errors = async_base_view.AsyncBaseHTTPView._handle_errors
+    old_sync_view_handle_errors = sync_base_view.SyncBaseHTTPView._handle_errors
+
+    def _sentry_patched_async_view_handle_errors(self, errors, response_data):
+        # type: (Any, List[GraphQLError], GraphQLHTTPResponse) -> None
+        old_async_view_handle_errors(self, errors, response_data)
+        _sentry_patched_handle_errors(self, errors, response_data)
+
+    def _sentry_patched_sync_view_handle_errors(self, errors, response_data):
+        # type: (Any, List[GraphQLError], GraphQLHTTPResponse) -> None
+        old_sync_view_handle_errors(self, errors, response_data)
+        _sentry_patched_handle_errors(self, errors, response_data)
+
+    @ensure_integration_enabled(StrawberryIntegration)
+    def _sentry_patched_handle_errors(self, errors, response_data):
+        # type: (Any, List[GraphQLError], GraphQLHTTPResponse) -> None
+        if not errors:
+            return
+
+        scope = sentry_sdk.get_isolation_scope()
+        event_processor = _make_response_event_processor(response_data)
+        scope.add_event_processor(event_processor)
+
+        with capture_internal_exceptions():
+            for error in errors:
+                event, hint = event_from_exception(
+                    error,
+                    client_options=sentry_sdk.get_client().options,
+                    mechanism={
+                        "type": StrawberryIntegration.identifier,
+                        "handled": False,
+                    },
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+
+    async_base_view.AsyncBaseHTTPView._handle_errors = (  # type: ignore[method-assign]
+        _sentry_patched_async_view_handle_errors
+    )
+    sync_base_view.SyncBaseHTTPView._handle_errors = (  # type: ignore[method-assign]
+        _sentry_patched_sync_view_handle_errors
+    )
+
+
+def _make_request_event_processor(execution_context):
+    # type: (ExecutionContext) -> EventProcessor
+
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        with capture_internal_exceptions():
+            if should_send_default_pii():
+                request_data = event.setdefault("request", {})
+                request_data["api_target"] = "graphql"
+
+                if not request_data.get("data"):
+                    data = {"query": execution_context.query}  # type: dict[str, Any]
+                    if execution_context.variables:
+                        data["variables"] = execution_context.variables
+                    if execution_context.operation_name:
+                        data["operationName"] = execution_context.operation_name
+
+                    request_data["data"] = data
+
+            else:
+                try:
+                    del event["request"]["data"]
+                except (KeyError, TypeError):
+                    pass
+
+        return event
+
+    return inner
+
+
+def _make_response_event_processor(response_data):
+    # type: (GraphQLHTTPResponse) -> EventProcessor
+
+    def inner(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        with capture_internal_exceptions():
+            if should_send_default_pii():
+                contexts = event.setdefault("contexts", {})
+                contexts["response"] = {"data": response_data}
+
+        return event
+
+    return inner
+
+
+def _guess_if_using_async(extensions):
+    # type: (List[SchemaExtension]) -> bool
+    if StrawberrySentryAsyncExtension in extensions:
+        return True
+    elif StrawberrySentrySyncExtension in extensions:
+        return False
+
+    return bool(
+        {"starlette", "starlite", "litestar", "fastapi"} & set(_get_installed_modules())
+    )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sys_exit.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sys_exit.py
new file mode 100644
index 00000000..2341e113
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/sys_exit.py
@@ -0,0 +1,70 @@
+import functools
+import sys
+
+import sentry_sdk
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk.integrations import Integration
+from sentry_sdk._types import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+    from typing import NoReturn, Union
+
+
+class SysExitIntegration(Integration):
+    """Captures sys.exit calls and sends them as events to Sentry.
+
+    By default, SystemExit exceptions are not captured by the SDK. Enabling this integration will capture SystemExit
+    exceptions generated by sys.exit calls and send them to Sentry.
+
+    This integration, in its default configuration, only captures the sys.exit call if the exit code is a non-zero and
+    non-None value (unsuccessful exits). Pass `capture_successful_exits=True` to capture successful exits as well.
+    Note that the integration does not capture SystemExit exceptions raised outside a call to sys.exit.
+    """
+
+    identifier = "sys_exit"
+
+    def __init__(self, *, capture_successful_exits=False):
+        # type: (bool) -> None
+        self._capture_successful_exits = capture_successful_exits
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        SysExitIntegration._patch_sys_exit()
+
+    @staticmethod
+    def _patch_sys_exit():
+        # type: () -> None
+        old_exit = sys.exit  # type: Callable[[Union[str, int, None]], NoReturn]
+
+        @functools.wraps(old_exit)
+        def sentry_patched_exit(__status=0):
+            # type: (Union[str, int, None]) -> NoReturn
+            # @ensure_integration_enabled ensures that this is non-None
+            integration = sentry_sdk.get_client().get_integration(SysExitIntegration)
+            if integration is None:
+                old_exit(__status)
+
+            try:
+                old_exit(__status)
+            except SystemExit as e:
+                with capture_internal_exceptions():
+                    if integration._capture_successful_exits or __status not in (
+                        0,
+                        None,
+                    ):
+                        _capture_exception(e)
+                raise e
+
+        sys.exit = sentry_patched_exit
+
+
+def _capture_exception(exc):
+    # type: (SystemExit) -> None
+    event, hint = event_from_exception(
+        exc,
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": SysExitIntegration.identifier, "handled": False},
+    )
+    sentry_sdk.capture_event(event, hint=hint)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/threading.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/threading.py
new file mode 100644
index 00000000..5de736e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/threading.py
@@ -0,0 +1,121 @@
+import sys
+from functools import wraps
+from threading import Thread, current_thread
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import use_isolation_scope, use_scope
+from sentry_sdk.utils import (
+    event_from_exception,
+    capture_internal_exceptions,
+    logger,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import TypeVar
+    from typing import Callable
+    from typing import Optional
+
+    from sentry_sdk._types import ExcInfo
+
+    F = TypeVar("F", bound=Callable[..., Any])
+
+
+class ThreadingIntegration(Integration):
+    identifier = "threading"
+
+    def __init__(self, propagate_hub=None, propagate_scope=True):
+        # type: (Optional[bool], bool) -> None
+        if propagate_hub is not None:
+            logger.warning(
+                "Deprecated: propagate_hub is deprecated. This will be removed in the future."
+            )
+
+        # Note: propagate_hub did not have any effect on propagation of scope data
+        # scope data was always propagated no matter what the value of propagate_hub was
+        # This is why the default for propagate_scope is True
+
+        self.propagate_scope = propagate_scope
+
+        if propagate_hub is not None:
+            self.propagate_scope = propagate_hub
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        old_start = Thread.start
+
+        @wraps(old_start)
+        def sentry_start(self, *a, **kw):
+            # type: (Thread, *Any, **Any) -> Any
+            integration = sentry_sdk.get_client().get_integration(ThreadingIntegration)
+            if integration is None:
+                return old_start(self, *a, **kw)
+
+            if integration.propagate_scope:
+                isolation_scope = sentry_sdk.get_isolation_scope()
+                current_scope = sentry_sdk.get_current_scope()
+            else:
+                isolation_scope = None
+                current_scope = None
+
+            # Patching instance methods in `start()` creates a reference cycle if
+            # done in a naive way. See
+            # https://github.com/getsentry/sentry-python/pull/434
+            #
+            # In threading module, using current_thread API will access current thread instance
+            # without holding it to avoid a reference cycle in an easier way.
+            with capture_internal_exceptions():
+                new_run = _wrap_run(
+                    isolation_scope,
+                    current_scope,
+                    getattr(self.run, "__func__", self.run),
+                )
+                self.run = new_run  # type: ignore
+
+            return old_start(self, *a, **kw)
+
+        Thread.start = sentry_start  # type: ignore
+
+
+def _wrap_run(isolation_scope_to_use, current_scope_to_use, old_run_func):
+    # type: (Optional[sentry_sdk.Scope], Optional[sentry_sdk.Scope], F) -> F
+    @wraps(old_run_func)
+    def run(*a, **kw):
+        # type: (*Any, **Any) -> Any
+        def _run_old_run_func():
+            # type: () -> Any
+            try:
+                self = current_thread()
+                return old_run_func(self, *a, **kw)
+            except Exception:
+                reraise(*_capture_exception())
+
+        if isolation_scope_to_use is not None and current_scope_to_use is not None:
+            with use_isolation_scope(isolation_scope_to_use):
+                with use_scope(current_scope_to_use):
+                    return _run_old_run_func()
+        else:
+            return _run_old_run_func()
+
+    return run  # type: ignore
+
+
+def _capture_exception():
+    # type: () -> ExcInfo
+    exc_info = sys.exc_info()
+
+    client = sentry_sdk.get_client()
+    if client.get_integration(ThreadingIntegration) is not None:
+        event, hint = event_from_exception(
+            exc_info,
+            client_options=client.options,
+            mechanism={"type": "threading", "handled": False},
+        )
+        sentry_sdk.capture_event(event, hint=hint)
+
+    return exc_info
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/tornado.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/tornado.py
new file mode 100644
index 00000000..3cd08752
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/tornado.py
@@ -0,0 +1,220 @@
+import weakref
+import contextlib
+from inspect import iscoroutinefunction
+
+import sentry_sdk
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.tracing import TransactionSource
+from sentry_sdk.utils import (
+    HAS_REAL_CONTEXTVARS,
+    CONTEXTVARS_ERROR_MESSAGE,
+    ensure_integration_enabled,
+    event_from_exception,
+    capture_internal_exceptions,
+    transaction_from_function,
+)
+from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import (
+    RequestExtractor,
+    _filter_headers,
+    _is_json_content_type,
+)
+from sentry_sdk.integrations.logging import ignore_logger
+
+try:
+    from tornado import version_info as TORNADO_VERSION
+    from tornado.web import RequestHandler, HTTPError
+    from tornado.gen import coroutine
+except ImportError:
+    raise DidNotEnable("Tornado not installed")
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+    from typing import Dict
+    from typing import Callable
+    from typing import Generator
+
+    from sentry_sdk._types import Event, EventProcessor
+
+
+class TornadoIntegration(Integration):
+    identifier = "tornado"
+    origin = f"auto.http.{identifier}"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        _check_minimum_version(TornadoIntegration, TORNADO_VERSION)
+
+        if not HAS_REAL_CONTEXTVARS:
+            # Tornado is async. We better have contextvars or we're going to leak
+            # state between requests.
+            raise DidNotEnable(
+                "The tornado integration for Sentry requires Python 3.7+ or the aiocontextvars package"
+                + CONTEXTVARS_ERROR_MESSAGE
+            )
+
+        ignore_logger("tornado.access")
+
+        old_execute = RequestHandler._execute
+
+        awaitable = iscoroutinefunction(old_execute)
+
+        if awaitable:
+            # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)
+            # In that case our method should be a coroutine function too
+            async def sentry_execute_request_handler(self, *args, **kwargs):
+                # type: (RequestHandler, *Any, **Any) -> Any
+                with _handle_request_impl(self):
+                    return await old_execute(self, *args, **kwargs)
+
+        else:
+
+            @coroutine  # type: ignore
+            def sentry_execute_request_handler(self, *args, **kwargs):  # type: ignore
+                # type: (RequestHandler, *Any, **Any) -> Any
+                with _handle_request_impl(self):
+                    result = yield from old_execute(self, *args, **kwargs)
+                    return result
+
+        RequestHandler._execute = sentry_execute_request_handler
+
+        old_log_exception = RequestHandler.log_exception
+
+        def sentry_log_exception(self, ty, value, tb, *args, **kwargs):
+            # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]
+            _capture_exception(ty, value, tb)
+            return old_log_exception(self, ty, value, tb, *args, **kwargs)
+
+        RequestHandler.log_exception = sentry_log_exception
+
+
+@contextlib.contextmanager
+def _handle_request_impl(self):
+    # type: (RequestHandler) -> Generator[None, None, None]
+    integration = sentry_sdk.get_client().get_integration(TornadoIntegration)
+
+    if integration is None:
+        yield
+
+    weak_handler = weakref.ref(self)
+
+    with sentry_sdk.isolation_scope() as scope:
+        headers = self.request.headers
+
+        scope.clear_breadcrumbs()
+        processor = _make_event_processor(weak_handler)
+        scope.add_event_processor(processor)
+
+        transaction = continue_trace(
+            headers,
+            op=OP.HTTP_SERVER,
+            # Like with all other integrations, this is our
+            # fallback transaction in case there is no route.
+            # sentry_urldispatcher_resolve is responsible for
+            # setting a transaction name later.
+            name="generic Tornado request",
+            source=TransactionSource.ROUTE,
+            origin=TornadoIntegration.origin,
+        )
+
+        with sentry_sdk.start_transaction(
+            transaction, custom_sampling_context={"tornado_request": self.request}
+        ):
+            yield
+
+
+@ensure_integration_enabled(TornadoIntegration)
+def _capture_exception(ty, value, tb):
+    # type: (type, BaseException, Any) -> None
+    if isinstance(value, HTTPError):
+        return
+
+    event, hint = event_from_exception(
+        (ty, value, tb),
+        client_options=sentry_sdk.get_client().options,
+        mechanism={"type": "tornado", "handled": False},
+    )
+
+    sentry_sdk.capture_event(event, hint=hint)
+
+
+def _make_event_processor(weak_handler):
+    # type: (Callable[[], RequestHandler]) -> EventProcessor
+    def tornado_processor(event, hint):
+        # type: (Event, dict[str, Any]) -> Event
+        handler = weak_handler()
+        if handler is None:
+            return event
+
+        request = handler.request
+
+        with capture_internal_exceptions():
+            method = getattr(handler, handler.request.method.lower())
+            event["transaction"] = transaction_from_function(method) or ""
+            event["transaction_info"] = {"source": TransactionSource.COMPONENT}
+
+        with capture_internal_exceptions():
+            extractor = TornadoRequestExtractor(request)
+            extractor.extract_into_event(event)
+
+            request_info = event["request"]
+
+            request_info["url"] = "%s://%s%s" % (
+                request.protocol,
+                request.host,
+                request.path,
+            )
+
+            request_info["query_string"] = request.query
+            request_info["method"] = request.method
+            request_info["env"] = {"REMOTE_ADDR": request.remote_ip}
+            request_info["headers"] = _filter_headers(dict(request.headers))
+
+        with capture_internal_exceptions():
+            if handler.current_user and should_send_default_pii():
+                event.setdefault("user", {}).setdefault("is_authenticated", True)
+
+        return event
+
+    return tornado_processor
+
+
+class TornadoRequestExtractor(RequestExtractor):
+    def content_length(self):
+        # type: () -> int
+        if self.request.body is None:
+            return 0
+        return len(self.request.body)
+
+    def cookies(self):
+        # type: () -> Dict[str, str]
+        return {k: v.value for k, v in self.request.cookies.items()}
+
+    def raw_data(self):
+        # type: () -> bytes
+        return self.request.body
+
+    def form(self):
+        # type: () -> Dict[str, Any]
+        return {
+            k: [v.decode("latin1", "replace") for v in vs]
+            for k, vs in self.request.body_arguments.items()
+        }
+
+    def is_json(self):
+        # type: () -> bool
+        return _is_json_content_type(self.request.headers.get("content-type"))
+
+    def files(self):
+        # type: () -> Dict[str, Any]
+        return {k: v[0] for k, v in self.request.files.items() if v}
+
+    def size_of_file(self, file):
+        # type: (Any) -> int
+        return len(file.body or ())
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/trytond.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/trytond.py
new file mode 100644
index 00000000..2c44c593
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/trytond.py
@@ -0,0 +1,50 @@
+import sentry_sdk
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.utils import ensure_integration_enabled, event_from_exception
+
+from trytond.exceptions import TrytonException  # type: ignore
+from trytond.wsgi import app  # type: ignore
+
+
+# TODO: trytond-worker, trytond-cron and trytond-admin intergations
+
+
+class TrytondWSGIIntegration(Integration):
+    identifier = "trytond_wsgi"
+    origin = f"auto.http.{identifier}"
+
+    def __init__(self):  # type: () -> None
+        pass
+
+    @staticmethod
+    def setup_once():  # type: () -> None
+        app.wsgi_app = SentryWsgiMiddleware(
+            app.wsgi_app,
+            span_origin=TrytondWSGIIntegration.origin,
+        )
+
+        @ensure_integration_enabled(TrytondWSGIIntegration)
+        def error_handler(e):  # type: (Exception) -> None
+            if isinstance(e, TrytonException):
+                return
+            else:
+                client = sentry_sdk.get_client()
+                event, hint = event_from_exception(
+                    e,
+                    client_options=client.options,
+                    mechanism={"type": "trytond", "handled": False},
+                )
+                sentry_sdk.capture_event(event, hint=hint)
+
+        # Expected error handlers signature was changed
+        # when the error_handler decorator was introduced
+        # in Tryton-5.4
+        if hasattr(app, "error_handler"):
+
+            @app.error_handler
+            def _(app, request, e):  # type: ignore
+                error_handler(e)
+
+        else:
+            app.error_handlers.append(error_handler)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/typer.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/typer.py
new file mode 100644
index 00000000..8879d6d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/typer.py
@@ -0,0 +1,60 @@
+import sentry_sdk
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    event_from_exception,
+)
+from sentry_sdk.integrations import Integration, DidNotEnable
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Callable
+    from typing import Any
+    from typing import Type
+    from typing import Optional
+
+    from types import TracebackType
+
+    Excepthook = Callable[
+        [Type[BaseException], BaseException, Optional[TracebackType]],
+        Any,
+    ]
+
+try:
+    import typer
+except ImportError:
+    raise DidNotEnable("Typer not installed")
+
+
+class TyperIntegration(Integration):
+    identifier = "typer"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        typer.main.except_hook = _make_excepthook(typer.main.except_hook)  # type: ignore
+
+
+def _make_excepthook(old_excepthook):
+    # type: (Excepthook) -> Excepthook
+    def sentry_sdk_excepthook(type_, value, traceback):
+        # type: (Type[BaseException], BaseException, Optional[TracebackType]) -> None
+        integration = sentry_sdk.get_client().get_integration(TyperIntegration)
+
+        # Note: If we replace this with ensure_integration_enabled then
+        # we break the exceptiongroup backport;
+        # See: https://github.com/getsentry/sentry-python/issues/3097
+        if integration is None:
+            return old_excepthook(type_, value, traceback)
+
+        with capture_internal_exceptions():
+            event, hint = event_from_exception(
+                (type_, value, traceback),
+                client_options=sentry_sdk.get_client().options,
+                mechanism={"type": "typer", "handled": False},
+            )
+            sentry_sdk.capture_event(event, hint=hint)
+
+        return old_excepthook(type_, value, traceback)
+
+    return sentry_sdk_excepthook
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/unleash.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/unleash.py
new file mode 100644
index 00000000..873f36c6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/unleash.py
@@ -0,0 +1,34 @@
+from functools import wraps
+from typing import Any
+
+import sentry_sdk
+from sentry_sdk.integrations import Integration, DidNotEnable
+
+try:
+    from UnleashClient import UnleashClient
+except ImportError:
+    raise DidNotEnable("UnleashClient is not installed")
+
+
+class UnleashIntegration(Integration):
+    identifier = "unleash"
+
+    @staticmethod
+    def setup_once():
+        # type: () -> None
+        # Wrap and patch evaluation methods (class methods)
+        old_is_enabled = UnleashClient.is_enabled
+
+        @wraps(old_is_enabled)
+        def sentry_is_enabled(self, feature, *args, **kwargs):
+            # type: (UnleashClient, str, *Any, **Any) -> Any
+            enabled = old_is_enabled(self, feature, *args, **kwargs)
+
+            # We have no way of knowing what type of unleash feature this is, so we have to treat
+            # it as a boolean / toggle feature.
+            flags = sentry_sdk.get_current_scope().flags
+            flags.set(feature, enabled)
+
+            return enabled
+
+        UnleashClient.is_enabled = sentry_is_enabled  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/wsgi.py b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/wsgi.py
new file mode 100644
index 00000000..e628e50e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/integrations/wsgi.py
@@ -0,0 +1,310 @@
+import sys
+from functools import partial
+
+import sentry_sdk
+from sentry_sdk._werkzeug import get_host, _get_headers
+from sentry_sdk.api import continue_trace
+from sentry_sdk.consts import OP
+from sentry_sdk.scope import should_send_default_pii
+from sentry_sdk.integrations._wsgi_common import (
+    DEFAULT_HTTP_METHODS_TO_CAPTURE,
+    _filter_headers,
+    nullcontext,
+)
+from sentry_sdk.sessions import track_session
+from sentry_sdk.scope import use_isolation_scope
+from sentry_sdk.tracing import Transaction, TransactionSource
+from sentry_sdk.utils import (
+    ContextVar,
+    capture_internal_exceptions,
+    event_from_exception,
+    reraise,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Callable
+    from typing import Dict
+    from typing import Iterator
+    from typing import Any
+    from typing import Tuple
+    from typing import Optional
+    from typing import TypeVar
+    from typing import Protocol
+
+    from sentry_sdk.utils import ExcInfo
+    from sentry_sdk._types import Event, EventProcessor
+
+    WsgiResponseIter = TypeVar("WsgiResponseIter")
+    WsgiResponseHeaders = TypeVar("WsgiResponseHeaders")
+    WsgiExcInfo = TypeVar("WsgiExcInfo")
+
+    class StartResponse(Protocol):
+        def __call__(self, status, response_headers, exc_info=None):  # type: ignore
+            # type: (str, WsgiResponseHeaders, Optional[WsgiExcInfo]) -> WsgiResponseIter
+            pass
+
+
+_wsgi_middleware_applied = ContextVar("sentry_wsgi_middleware_applied")
+
+
+def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
+    # type: (str, str, str) -> str
+    return s.encode("latin1").decode(charset, errors)
+
+
+def get_request_url(environ, use_x_forwarded_for=False):
+    # type: (Dict[str, str], bool) -> str
+    """Return the absolute URL without query string for the given WSGI
+    environment."""
+    script_name = environ.get("SCRIPT_NAME", "").rstrip("/")
+    path_info = environ.get("PATH_INFO", "").lstrip("/")
+    path = f"{script_name}/{path_info}"
+
+    return "%s://%s/%s" % (
+        environ.get("wsgi.url_scheme"),
+        get_host(environ, use_x_forwarded_for),
+        wsgi_decoding_dance(path).lstrip("/"),
+    )
+
+
+class SentryWsgiMiddleware:
+    __slots__ = (
+        "app",
+        "use_x_forwarded_for",
+        "span_origin",
+        "http_methods_to_capture",
+    )
+
+    def __init__(
+        self,
+        app,  # type: Callable[[Dict[str, str], Callable[..., Any]], Any]
+        use_x_forwarded_for=False,  # type: bool
+        span_origin="manual",  # type: str
+        http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE,  # type: Tuple[str, ...]
+    ):
+        # type: (...) -> None
+        self.app = app
+        self.use_x_forwarded_for = use_x_forwarded_for
+        self.span_origin = span_origin
+        self.http_methods_to_capture = http_methods_to_capture
+
+    def __call__(self, environ, start_response):
+        # type: (Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+        if _wsgi_middleware_applied.get(False):
+            return self.app(environ, start_response)
+
+        _wsgi_middleware_applied.set(True)
+        try:
+            with sentry_sdk.isolation_scope() as scope:
+                with track_session(scope, session_mode="request"):
+                    with capture_internal_exceptions():
+                        scope.clear_breadcrumbs()
+                        scope._name = "wsgi"
+                        scope.add_event_processor(
+                            _make_wsgi_event_processor(
+                                environ, self.use_x_forwarded_for
+                            )
+                        )
+
+                    method = environ.get("REQUEST_METHOD", "").upper()
+                    transaction = None
+                    if method in self.http_methods_to_capture:
+                        transaction = continue_trace(
+                            environ,
+                            op=OP.HTTP_SERVER,
+                            name="generic WSGI request",
+                            source=TransactionSource.ROUTE,
+                            origin=self.span_origin,
+                        )
+
+                    with (
+                        sentry_sdk.start_transaction(
+                            transaction,
+                            custom_sampling_context={"wsgi_environ": environ},
+                        )
+                        if transaction is not None
+                        else nullcontext()
+                    ):
+                        try:
+                            response = self.app(
+                                environ,
+                                partial(
+                                    _sentry_start_response, start_response, transaction
+                                ),
+                            )
+                        except BaseException:
+                            reraise(*_capture_exception())
+        finally:
+            _wsgi_middleware_applied.set(False)
+
+        return _ScopedResponse(scope, response)
+
+
+def _sentry_start_response(  # type: ignore
+    old_start_response,  # type: StartResponse
+    transaction,  # type: Optional[Transaction]
+    status,  # type: str
+    response_headers,  # type: WsgiResponseHeaders
+    exc_info=None,  # type: Optional[WsgiExcInfo]
+):
+    # type: (...) -> WsgiResponseIter
+    with capture_internal_exceptions():
+        status_int = int(status.split(" ", 1)[0])
+        if transaction is not None:
+            transaction.set_http_status(status_int)
+
+    if exc_info is None:
+        # The Django Rest Framework WSGI test client, and likely other
+        # (incorrect) implementations, cannot deal with the exc_info argument
+        # if one is present. Avoid providing a third argument if not necessary.
+        return old_start_response(status, response_headers)
+    else:
+        return old_start_response(status, response_headers, exc_info)
+
+
+def _get_environ(environ):
+    # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
+    """
+    Returns our explicitly included environment variables we want to
+    capture (server name, port and remote addr if pii is enabled).
+    """
+    keys = ["SERVER_NAME", "SERVER_PORT"]
+    if should_send_default_pii():
+        # make debugging of proxy setup easier. Proxy headers are
+        # in headers.
+        keys += ["REMOTE_ADDR"]
+
+    for key in keys:
+        if key in environ:
+            yield key, environ[key]
+
+
+def get_client_ip(environ):
+    # type: (Dict[str, str]) -> Optional[Any]
+    """
+    Infer the user IP address from various headers. This cannot be used in
+    security sensitive situations since the value may be forged from a client,
+    but it's good enough for the event payload.
+    """
+    try:
+        return environ["HTTP_X_FORWARDED_FOR"].split(",")[0].strip()
+    except (KeyError, IndexError):
+        pass
+
+    try:
+        return environ["HTTP_X_REAL_IP"]
+    except KeyError:
+        pass
+
+    return environ.get("REMOTE_ADDR")
+
+
+def _capture_exception():
+    # type: () -> ExcInfo
+    """
+    Captures the current exception and sends it to Sentry.
+    Returns the ExcInfo tuple to it can be reraised afterwards.
+    """
+    exc_info = sys.exc_info()
+    e = exc_info[1]
+
+    # SystemExit(0) is the only uncaught exception that is expected behavior
+    should_skip_capture = isinstance(e, SystemExit) and e.code in (0, None)
+    if not should_skip_capture:
+        event, hint = event_from_exception(
+            exc_info,
+            client_options=sentry_sdk.get_client().options,
+            mechanism={"type": "wsgi", "handled": False},
+        )
+        sentry_sdk.capture_event(event, hint=hint)
+
+    return exc_info
+
+
+class _ScopedResponse:
+    """
+    Users a separate scope for each response chunk.
+
+    This will make WSGI apps more tolerant against:
+    - WSGI servers streaming responses from a different thread/from
+      different threads than the one that called start_response
+    - close() not being called
+    - WSGI servers streaming responses interleaved from the same thread
+    """
+
+    __slots__ = ("_response", "_scope")
+
+    def __init__(self, scope, response):
+        # type: (sentry_sdk.scope.Scope, Iterator[bytes]) -> None
+        self._scope = scope
+        self._response = response
+
+    def __iter__(self):
+        # type: () -> Iterator[bytes]
+        iterator = iter(self._response)
+
+        while True:
+            with use_isolation_scope(self._scope):
+                try:
+                    chunk = next(iterator)
+                except StopIteration:
+                    break
+                except BaseException:
+                    reraise(*_capture_exception())
+
+            yield chunk
+
+    def close(self):
+        # type: () -> None
+        with use_isolation_scope(self._scope):
+            try:
+                self._response.close()  # type: ignore
+            except AttributeError:
+                pass
+            except BaseException:
+                reraise(*_capture_exception())
+
+
+def _make_wsgi_event_processor(environ, use_x_forwarded_for):
+    # type: (Dict[str, str], bool) -> EventProcessor
+    # It's a bit unfortunate that we have to extract and parse the request data
+    # from the environ so eagerly, but there are a few good reasons for this.
+    #
+    # We might be in a situation where the scope never gets torn down
+    # properly. In that case we will have an unnecessary strong reference to
+    # all objects in the environ (some of which may take a lot of memory) when
+    # we're really just interested in a few of them.
+    #
+    # Keeping the environment around for longer than the request lifecycle is
+    # also not necessarily something uWSGI can deal with:
+    # https://github.com/unbit/uwsgi/issues/1950
+
+    client_ip = get_client_ip(environ)
+    request_url = get_request_url(environ, use_x_forwarded_for)
+    query_string = environ.get("QUERY_STRING")
+    method = environ.get("REQUEST_METHOD")
+    env = dict(_get_environ(environ))
+    headers = _filter_headers(dict(_get_headers(environ)))
+
+    def event_processor(event, hint):
+        # type: (Event, Dict[str, Any]) -> Event
+        with capture_internal_exceptions():
+            # if the code below fails halfway through we at least have some data
+            request_info = event.setdefault("request", {})
+
+            if should_send_default_pii():
+                user_info = event.setdefault("user", {})
+                if client_ip:
+                    user_info.setdefault("ip_address", client_ip)
+
+            request_info["url"] = request_url
+            request_info["query_string"] = query_string
+            request_info["method"] = method
+            request_info["env"] = env
+            request_info["headers"] = headers
+
+        return event
+
+    return event_processor
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/metrics.py b/.venv/lib/python3.12/site-packages/sentry_sdk/metrics.py
new file mode 100644
index 00000000..4bdbc622
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/metrics.py
@@ -0,0 +1,965 @@
+import io
+import os
+import random
+import re
+import sys
+import threading
+import time
+import warnings
+import zlib
+from abc import ABC, abstractmethod
+from contextlib import contextmanager
+from datetime import datetime, timezone
+from functools import wraps, partial
+
+import sentry_sdk
+from sentry_sdk.utils import (
+    ContextVar,
+    now,
+    nanosecond_time,
+    to_timestamp,
+    serialize_frame,
+    json_dumps,
+)
+from sentry_sdk.envelope import Envelope, Item
+from sentry_sdk.tracing import TransactionSource
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Generator
+    from typing import Iterable
+    from typing import List
+    from typing import Optional
+    from typing import Set
+    from typing import Tuple
+    from typing import Union
+
+    from sentry_sdk._types import BucketKey
+    from sentry_sdk._types import DurationUnit
+    from sentry_sdk._types import FlushedMetricValue
+    from sentry_sdk._types import MeasurementUnit
+    from sentry_sdk._types import MetricMetaKey
+    from sentry_sdk._types import MetricTagValue
+    from sentry_sdk._types import MetricTags
+    from sentry_sdk._types import MetricTagsInternal
+    from sentry_sdk._types import MetricType
+    from sentry_sdk._types import MetricValue
+
+
+warnings.warn(
+    "The sentry_sdk.metrics module is deprecated and will be removed in the next major release. "
+    "Sentry will reject all metrics sent after October 7, 2024. "
+    "Learn more: https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+_in_metrics = ContextVar("in_metrics", default=False)
+_set = set  # set is shadowed below
+
+GOOD_TRANSACTION_SOURCES = frozenset(
+    [
+        TransactionSource.ROUTE,
+        TransactionSource.VIEW,
+        TransactionSource.COMPONENT,
+        TransactionSource.TASK,
+    ]
+)
+
+_sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "")
+_sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_")
+_sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "")
+
+
+def _sanitize_tag_value(value):
+    # type: (str) -> str
+    table = str.maketrans(
+        {
+            "\n": "\\n",
+            "\r": "\\r",
+            "\t": "\\t",
+            "\\": "\\\\",
+            "|": "\\u{7c}",
+            ",": "\\u{2c}",
+        }
+    )
+    return value.translate(table)
+
+
+def get_code_location(stacklevel):
+    # type: (int) -> Optional[Dict[str, Any]]
+    try:
+        frm = sys._getframe(stacklevel)
+    except Exception:
+        return None
+
+    return serialize_frame(
+        frm, include_local_variables=False, include_source_context=True
+    )
+
+
+@contextmanager
+def recursion_protection():
+    # type: () -> Generator[bool, None, None]
+    """Enters recursion protection and returns the old flag."""
+    old_in_metrics = _in_metrics.get()
+    _in_metrics.set(True)
+    try:
+        yield old_in_metrics
+    finally:
+        _in_metrics.set(old_in_metrics)
+
+
+def metrics_noop(func):
+    # type: (Any) -> Any
+    """Convenient decorator that uses `recursion_protection` to
+    make a function a noop.
+    """
+
+    @wraps(func)
+    def new_func(*args, **kwargs):
+        # type: (*Any, **Any) -> Any
+        with recursion_protection() as in_metrics:
+            if not in_metrics:
+                return func(*args, **kwargs)
+
+    return new_func
+
+
+class Metric(ABC):
+    __slots__ = ()
+
+    @abstractmethod
+    def __init__(self, first):
+        # type: (MetricValue) -> None
+        pass
+
+    @property
+    @abstractmethod
+    def weight(self):
+        # type: () -> int
+        pass
+
+    @abstractmethod
+    def add(self, value):
+        # type: (MetricValue) -> None
+        pass
+
+    @abstractmethod
+    def serialize_value(self):
+        # type: () -> Iterable[FlushedMetricValue]
+        pass
+
+
+class CounterMetric(Metric):
+    __slots__ = ("value",)
+
+    def __init__(
+        self, first  # type: MetricValue
+    ):
+        # type: (...) -> None
+        self.value = float(first)
+
+    @property
+    def weight(self):
+        # type: (...) -> int
+        return 1
+
+    def add(
+        self, value  # type: MetricValue
+    ):
+        # type: (...) -> None
+        self.value += float(value)
+
+    def serialize_value(self):
+        # type: (...) -> Iterable[FlushedMetricValue]
+        return (self.value,)
+
+
+class GaugeMetric(Metric):
+    __slots__ = (
+        "last",
+        "min",
+        "max",
+        "sum",
+        "count",
+    )
+
+    def __init__(
+        self, first  # type: MetricValue
+    ):
+        # type: (...) -> None
+        first = float(first)
+        self.last = first
+        self.min = first
+        self.max = first
+        self.sum = first
+        self.count = 1
+
+    @property
+    def weight(self):
+        # type: (...) -> int
+        # Number of elements.
+        return 5
+
+    def add(
+        self, value  # type: MetricValue
+    ):
+        # type: (...) -> None
+        value = float(value)
+        self.last = value
+        self.min = min(self.min, value)
+        self.max = max(self.max, value)
+        self.sum += value
+        self.count += 1
+
+    def serialize_value(self):
+        # type: (...) -> Iterable[FlushedMetricValue]
+        return (
+            self.last,
+            self.min,
+            self.max,
+            self.sum,
+            self.count,
+        )
+
+
+class DistributionMetric(Metric):
+    __slots__ = ("value",)
+
+    def __init__(
+        self, first  # type: MetricValue
+    ):
+        # type(...) -> None
+        self.value = [float(first)]
+
+    @property
+    def weight(self):
+        # type: (...) -> int
+        return len(self.value)
+
+    def add(
+        self, value  # type: MetricValue
+    ):
+        # type: (...) -> None
+        self.value.append(float(value))
+
+    def serialize_value(self):
+        # type: (...) -> Iterable[FlushedMetricValue]
+        return self.value
+
+
+class SetMetric(Metric):
+    __slots__ = ("value",)
+
+    def __init__(
+        self, first  # type: MetricValue
+    ):
+        # type: (...) -> None
+        self.value = {first}
+
+    @property
+    def weight(self):
+        # type: (...) -> int
+        return len(self.value)
+
+    def add(
+        self, value  # type: MetricValue
+    ):
+        # type: (...) -> None
+        self.value.add(value)
+
+    def serialize_value(self):
+        # type: (...) -> Iterable[FlushedMetricValue]
+        def _hash(x):
+            # type: (MetricValue) -> int
+            if isinstance(x, str):
+                return zlib.crc32(x.encode("utf-8")) & 0xFFFFFFFF
+            return int(x)
+
+        return (_hash(value) for value in self.value)
+
+
+def _encode_metrics(flushable_buckets):
+    # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) -> bytes
+    out = io.BytesIO()
+    _write = out.write
+
+    # Note on sanitization: we intentionally sanitize in emission (serialization)
+    # and not during aggregation for performance reasons.  This means that the
+    # envelope can in fact have duplicate buckets stored.  This is acceptable for
+    # relay side emission and should not happen commonly.
+
+    for timestamp, buckets in flushable_buckets:
+        for bucket_key, metric in buckets.items():
+            metric_type, metric_name, metric_unit, metric_tags = bucket_key
+            metric_name = _sanitize_metric_key(metric_name)
+            metric_unit = _sanitize_unit(metric_unit)
+            _write(metric_name.encode("utf-8"))
+            _write(b"@")
+            _write(metric_unit.encode("utf-8"))
+
+            for serialized_value in metric.serialize_value():
+                _write(b":")
+                _write(str(serialized_value).encode("utf-8"))
+
+            _write(b"|")
+            _write(metric_type.encode("ascii"))
+
+            if metric_tags:
+                _write(b"|#")
+                first = True
+                for tag_key, tag_value in metric_tags:
+                    tag_key = _sanitize_tag_key(tag_key)
+                    if not tag_key:
+                        continue
+                    if first:
+                        first = False
+                    else:
+                        _write(b",")
+                    _write(tag_key.encode("utf-8"))
+                    _write(b":")
+                    _write(_sanitize_tag_value(tag_value).encode("utf-8"))
+
+            _write(b"|T")
+            _write(str(timestamp).encode("ascii"))
+            _write(b"\n")
+
+    return out.getvalue()
+
+
+def _encode_locations(timestamp, code_locations):
+    # type: (int, Iterable[Tuple[MetricMetaKey, Dict[str, Any]]]) -> bytes
+    mapping = {}  # type: Dict[str, List[Any]]
+
+    for key, loc in code_locations:
+        metric_type, name, unit = key
+        mri = "{}:{}@{}".format(
+            metric_type, _sanitize_metric_key(name), _sanitize_unit(unit)
+        )
+
+        loc["type"] = "location"
+        mapping.setdefault(mri, []).append(loc)
+
+    return json_dumps({"timestamp": timestamp, "mapping": mapping})
+
+
+METRIC_TYPES = {
+    "c": CounterMetric,
+    "g": GaugeMetric,
+    "d": DistributionMetric,
+    "s": SetMetric,
+}  # type: dict[MetricType, type[Metric]]
+
+# some of these are dumb
+TIMING_FUNCTIONS = {
+    "nanosecond": nanosecond_time,
+    "microsecond": lambda: nanosecond_time() / 1000.0,
+    "millisecond": lambda: nanosecond_time() / 1000000.0,
+    "second": now,
+    "minute": lambda: now() / 60.0,
+    "hour": lambda: now() / 3600.0,
+    "day": lambda: now() / 3600.0 / 24.0,
+    "week": lambda: now() / 3600.0 / 24.0 / 7.0,
+}
+
+
+class LocalAggregator:
+    __slots__ = ("_measurements",)
+
+    def __init__(self):
+        # type: (...) -> None
+        self._measurements = (
+            {}
+        )  # type: Dict[Tuple[str, MetricTagsInternal], Tuple[float, float, int, float]]
+
+    def add(
+        self,
+        ty,  # type: MetricType
+        key,  # type: str
+        value,  # type: float
+        unit,  # type: MeasurementUnit
+        tags,  # type: MetricTagsInternal
+    ):
+        # type: (...) -> None
+        export_key = "%s:%s@%s" % (ty, key, unit)
+        bucket_key = (export_key, tags)
+
+        old = self._measurements.get(bucket_key)
+        if old is not None:
+            v_min, v_max, v_count, v_sum = old
+            v_min = min(v_min, value)
+            v_max = max(v_max, value)
+            v_count += 1
+            v_sum += value
+        else:
+            v_min = v_max = v_sum = value
+            v_count = 1
+        self._measurements[bucket_key] = (v_min, v_max, v_count, v_sum)
+
+    def to_json(self):
+        # type: (...) -> Dict[str, Any]
+        rv = {}  # type: Any
+        for (export_key, tags), (
+            v_min,
+            v_max,
+            v_count,
+            v_sum,
+        ) in self._measurements.items():
+            rv.setdefault(export_key, []).append(
+                {
+                    "tags": _tags_to_dict(tags),
+                    "min": v_min,
+                    "max": v_max,
+                    "count": v_count,
+                    "sum": v_sum,
+                }
+            )
+        return rv
+
+
+class MetricsAggregator:
+    ROLLUP_IN_SECONDS = 10.0
+    MAX_WEIGHT = 100000
+    FLUSHER_SLEEP_TIME = 5.0
+
+    def __init__(
+        self,
+        capture_func,  # type: Callable[[Envelope], None]
+        enable_code_locations=False,  # type: bool
+    ):
+        # type: (...) -> None
+        self.buckets = {}  # type: Dict[int, Any]
+        self._enable_code_locations = enable_code_locations
+        self._seen_locations = _set()  # type: Set[Tuple[int, MetricMetaKey]]
+        self._pending_locations = {}  # type: Dict[int, List[Tuple[MetricMetaKey, Any]]]
+        self._buckets_total_weight = 0
+        self._capture_func = capture_func
+        self._running = True
+        self._lock = threading.Lock()
+
+        self._flush_event = threading.Event()  # type: threading.Event
+        self._force_flush = False
+
+        # The aggregator shifts its flushing by up to an entire rollup window to
+        # avoid multiple clients trampling on end of a 10 second window as all the
+        # buckets are anchored to multiples of ROLLUP seconds.  We randomize this
+        # number once per aggregator boot to achieve some level of offsetting
+        # across a fleet of deployed SDKs.  Relay itself will also apply independent
+        # jittering.
+        self._flush_shift = random.random() * self.ROLLUP_IN_SECONDS
+
+        self._flusher = None  # type: Optional[threading.Thread]
+        self._flusher_pid = None  # type: Optional[int]
+
+    def _ensure_thread(self):
+        # type: (...) -> bool
+        """For forking processes we might need to restart this thread.
+        This ensures that our process actually has that thread running.
+        """
+        if not self._running:
+            return False
+
+        pid = os.getpid()
+        if self._flusher_pid == pid:
+            return True
+
+        with self._lock:
+            # Recheck to make sure another thread didn't get here and start the
+            # the flusher in the meantime
+            if self._flusher_pid == pid:
+                return True
+
+            self._flusher_pid = pid
+
+            self._flusher = threading.Thread(target=self._flush_loop)
+            self._flusher.daemon = True
+
+            try:
+                self._flusher.start()
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self._running = False
+                return False
+
+        return True
+
+    def _flush_loop(self):
+        # type: (...) -> None
+        _in_metrics.set(True)
+        while self._running or self._force_flush:
+            if self._running:
+                self._flush_event.wait(self.FLUSHER_SLEEP_TIME)
+            self._flush()
+
+    def _flush(self):
+        # type: (...) -> None
+        self._emit(self._flushable_buckets(), self._flushable_locations())
+
+    def _flushable_buckets(self):
+        # type: (...) -> (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
+        with self._lock:
+            force_flush = self._force_flush
+            cutoff = time.time() - self.ROLLUP_IN_SECONDS - self._flush_shift
+            flushable_buckets = ()  # type: Iterable[Tuple[int, Dict[BucketKey, Metric]]]
+            weight_to_remove = 0
+
+            if force_flush:
+                flushable_buckets = self.buckets.items()
+                self.buckets = {}
+                self._buckets_total_weight = 0
+                self._force_flush = False
+            else:
+                flushable_buckets = []
+                for buckets_timestamp, buckets in self.buckets.items():
+                    # If the timestamp of the bucket is newer that the rollup we want to skip it.
+                    if buckets_timestamp <= cutoff:
+                        flushable_buckets.append((buckets_timestamp, buckets))
+
+                # We will clear the elements while holding the lock, in order to avoid requesting it downstream again.
+                for buckets_timestamp, buckets in flushable_buckets:
+                    for metric in buckets.values():
+                        weight_to_remove += metric.weight
+                    del self.buckets[buckets_timestamp]
+
+                self._buckets_total_weight -= weight_to_remove
+
+        return flushable_buckets
+
+    def _flushable_locations(self):
+        # type: (...) -> Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
+        with self._lock:
+            locations = self._pending_locations
+            self._pending_locations = {}
+        return locations
+
+    @metrics_noop
+    def add(
+        self,
+        ty,  # type: MetricType
+        key,  # type: str
+        value,  # type: MetricValue
+        unit,  # type: MeasurementUnit
+        tags,  # type: Optional[MetricTags]
+        timestamp=None,  # type: Optional[Union[float, datetime]]
+        local_aggregator=None,  # type: Optional[LocalAggregator]
+        stacklevel=0,  # type: Optional[int]
+    ):
+        # type: (...) -> None
+        if not self._ensure_thread() or self._flusher is None:
+            return None
+
+        if timestamp is None:
+            timestamp = time.time()
+        elif isinstance(timestamp, datetime):
+            timestamp = to_timestamp(timestamp)
+
+        bucket_timestamp = int(
+            (timestamp // self.ROLLUP_IN_SECONDS) * self.ROLLUP_IN_SECONDS
+        )
+        serialized_tags = _serialize_tags(tags)
+        bucket_key = (
+            ty,
+            key,
+            unit,
+            serialized_tags,
+        )
+
+        with self._lock:
+            local_buckets = self.buckets.setdefault(bucket_timestamp, {})
+            metric = local_buckets.get(bucket_key)
+            if metric is not None:
+                previous_weight = metric.weight
+                metric.add(value)
+            else:
+                metric = local_buckets[bucket_key] = METRIC_TYPES[ty](value)
+                previous_weight = 0
+
+            added = metric.weight - previous_weight
+
+            if stacklevel is not None:
+                self.record_code_location(ty, key, unit, stacklevel + 2, timestamp)
+
+        # Given the new weight we consider whether we want to force flush.
+        self._consider_force_flush()
+
+        # For sets, we only record that a value has been added to the set but not which one.
+        # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets
+        if local_aggregator is not None:
+            local_value = float(added if ty == "s" else value)
+            local_aggregator.add(ty, key, local_value, unit, serialized_tags)
+
+    def record_code_location(
+        self,
+        ty,  # type: MetricType
+        key,  # type: str
+        unit,  # type: MeasurementUnit
+        stacklevel,  # type: int
+        timestamp=None,  # type: Optional[float]
+    ):
+        # type: (...) -> None
+        if not self._enable_code_locations:
+            return
+        if timestamp is None:
+            timestamp = time.time()
+        meta_key = (ty, key, unit)
+        start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
+            hour=0, minute=0, second=0, microsecond=0, tzinfo=None
+        )
+        start_of_day = int(to_timestamp(start_of_day))
+
+        if (start_of_day, meta_key) not in self._seen_locations:
+            self._seen_locations.add((start_of_day, meta_key))
+            loc = get_code_location(stacklevel + 3)
+            if loc is not None:
+                # Group metadata by day to make flushing more efficient.
+                # There needs to be one envelope item per timestamp.
+                self._pending_locations.setdefault(start_of_day, []).append(
+                    (meta_key, loc)
+                )
+
+    @metrics_noop
+    def need_code_location(
+        self,
+        ty,  # type: MetricType
+        key,  # type: str
+        unit,  # type: MeasurementUnit
+        timestamp,  # type: float
+    ):
+        # type: (...) -> bool
+        if self._enable_code_locations:
+            return False
+        meta_key = (ty, key, unit)
+        start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
+            hour=0, minute=0, second=0, microsecond=0, tzinfo=None
+        )
+        start_of_day = int(to_timestamp(start_of_day))
+        return (start_of_day, meta_key) not in self._seen_locations
+
+    def kill(self):
+        # type: (...) -> None
+        if self._flusher is None:
+            return
+
+        self._running = False
+        self._flush_event.set()
+        self._flusher = None
+
+    @metrics_noop
+    def flush(self):
+        # type: (...) -> None
+        self._force_flush = True
+        self._flush()
+
+    def _consider_force_flush(self):
+        # type: (...) -> None
+        # It's important to acquire a lock around this method, since it will touch shared data structures.
+        total_weight = len(self.buckets) + self._buckets_total_weight
+        if total_weight >= self.MAX_WEIGHT:
+            self._force_flush = True
+            self._flush_event.set()
+
+    def _emit(
+        self,
+        flushable_buckets,  # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
+        code_locations,  # type: Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
+    ):
+        # type: (...) -> Optional[Envelope]
+        envelope = Envelope()
+
+        if flushable_buckets:
+            encoded_metrics = _encode_metrics(flushable_buckets)
+            envelope.add_item(Item(payload=encoded_metrics, type="statsd"))
+
+        for timestamp, locations in code_locations.items():
+            encoded_locations = _encode_locations(timestamp, locations)
+            envelope.add_item(Item(payload=encoded_locations, type="metric_meta"))
+
+        if envelope.items:
+            self._capture_func(envelope)
+            return envelope
+        return None
+
+
+def _serialize_tags(
+    tags,  # type: Optional[MetricTags]
+):
+    # type: (...) -> MetricTagsInternal
+    if not tags:
+        return ()
+
+    rv = []
+    for key, value in tags.items():
+        # If the value is a collection, we want to flatten it.
+        if isinstance(value, (list, tuple)):
+            for inner_value in value:
+                if inner_value is not None:
+                    rv.append((key, str(inner_value)))
+        elif value is not None:
+            rv.append((key, str(value)))
+
+    # It's very important to sort the tags in order to obtain the
+    # same bucket key.
+    return tuple(sorted(rv))
+
+
+def _tags_to_dict(tags):
+    # type: (MetricTagsInternal) -> Dict[str, Any]
+    rv = {}  # type: Dict[str, Any]
+    for tag_name, tag_value in tags:
+        old_value = rv.get(tag_name)
+        if old_value is not None:
+            if isinstance(old_value, list):
+                old_value.append(tag_value)
+            else:
+                rv[tag_name] = [old_value, tag_value]
+        else:
+            rv[tag_name] = tag_value
+    return rv
+
+
+def _get_aggregator():
+    # type: () -> Optional[MetricsAggregator]
+    client = sentry_sdk.get_client()
+    return (
+        client.metrics_aggregator
+        if client.is_active() and client.metrics_aggregator is not None
+        else None
+    )
+
+
+def _get_aggregator_and_update_tags(key, value, unit, tags):
+    # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]]
+    client = sentry_sdk.get_client()
+    if not client.is_active() or client.metrics_aggregator is None:
+        return None, None, tags
+
+    updated_tags = dict(tags or ())  # type: Dict[str, MetricTagValue]
+    updated_tags.setdefault("release", client.options["release"])
+    updated_tags.setdefault("environment", client.options["environment"])
+
+    scope = sentry_sdk.get_current_scope()
+    local_aggregator = None
+
+    # We go with the low-level API here to access transaction information as
+    # this one is the same between just errors and errors + performance
+    transaction_source = scope._transaction_info.get("source")
+    if transaction_source in GOOD_TRANSACTION_SOURCES:
+        transaction_name = scope._transaction
+        if transaction_name:
+            updated_tags.setdefault("transaction", transaction_name)
+        if scope._span is not None:
+            local_aggregator = scope._span._get_local_aggregator()
+
+    experiments = client.options.get("_experiments", {})
+    before_emit_callback = experiments.get("before_emit_metric")
+    if before_emit_callback is not None:
+        with recursion_protection() as in_metrics:
+            if not in_metrics:
+                if not before_emit_callback(key, value, unit, updated_tags):
+                    return None, None, updated_tags
+
+    return client.metrics_aggregator, local_aggregator, updated_tags
+
+
+def increment(
+    key,  # type: str
+    value=1.0,  # type: float
+    unit="none",  # type: MeasurementUnit
+    tags=None,  # type: Optional[MetricTags]
+    timestamp=None,  # type: Optional[Union[float, datetime]]
+    stacklevel=0,  # type: int
+):
+    # type: (...) -> None
+    """Increments a counter."""
+    aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+        key, value, unit, tags
+    )
+    if aggregator is not None:
+        aggregator.add(
+            "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel
+        )
+
+
+# alias as incr is relatively common in python
+incr = increment
+
+
+class _Timing:
+    def __init__(
+        self,
+        key,  # type: str
+        tags,  # type: Optional[MetricTags]
+        timestamp,  # type: Optional[Union[float, datetime]]
+        value,  # type: Optional[float]
+        unit,  # type: DurationUnit
+        stacklevel,  # type: int
+    ):
+        # type: (...) -> None
+        self.key = key
+        self.tags = tags
+        self.timestamp = timestamp
+        self.value = value
+        self.unit = unit
+        self.entered = None  # type: Optional[float]
+        self._span = None  # type: Optional[sentry_sdk.tracing.Span]
+        self.stacklevel = stacklevel
+
+    def _validate_invocation(self, context):
+        # type: (str) -> None
+        if self.value is not None:
+            raise TypeError(
+                "cannot use timing as %s when a value is provided" % context
+            )
+
+    def __enter__(self):
+        # type: (...) -> _Timing
+        self.entered = TIMING_FUNCTIONS[self.unit]()
+        self._validate_invocation("context-manager")
+        self._span = sentry_sdk.start_span(op="metric.timing", name=self.key)
+        if self.tags:
+            for key, value in self.tags.items():
+                if isinstance(value, (tuple, list)):
+                    value = ",".join(sorted(map(str, value)))
+                self._span.set_tag(key, value)
+        self._span.__enter__()
+
+        # report code locations here for better accuracy
+        aggregator = _get_aggregator()
+        if aggregator is not None:
+            aggregator.record_code_location("d", self.key, self.unit, self.stacklevel)
+
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # type: (Any, Any, Any) -> None
+        assert self._span, "did not enter"
+        aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+            self.key,
+            self.value,
+            self.unit,
+            self.tags,
+        )
+        if aggregator is not None:
+            elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered  # type: ignore
+            aggregator.add(
+                "d",
+                self.key,
+                elapsed,
+                self.unit,
+                tags,
+                self.timestamp,
+                local_aggregator,
+                None,  # code locations are reported in __enter__
+            )
+
+        self._span.__exit__(exc_type, exc_value, tb)
+        self._span = None
+
+    def __call__(self, f):
+        # type: (Any) -> Any
+        self._validate_invocation("decorator")
+
+        @wraps(f)
+        def timed_func(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+            with timing(
+                key=self.key,
+                tags=self.tags,
+                timestamp=self.timestamp,
+                unit=self.unit,
+                stacklevel=self.stacklevel + 1,
+            ):
+                return f(*args, **kwargs)
+
+        return timed_func
+
+
+def timing(
+    key,  # type: str
+    value=None,  # type: Optional[float]
+    unit="second",  # type: DurationUnit
+    tags=None,  # type: Optional[MetricTags]
+    timestamp=None,  # type: Optional[Union[float, datetime]]
+    stacklevel=0,  # type: int
+):
+    # type: (...) -> _Timing
+    """Emits a distribution with the time it takes to run the given code block.
+
+    This method supports three forms of invocation:
+
+    - when a `value` is provided, it functions similar to `distribution` but with
+    - it can be used as a context manager
+    - it can be used as a decorator
+    """
+    if value is not None:
+        aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+            key, value, unit, tags
+        )
+        if aggregator is not None:
+            aggregator.add(
+                "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
+            )
+    return _Timing(key, tags, timestamp, value, unit, stacklevel)
+
+
+def distribution(
+    key,  # type: str
+    value,  # type: float
+    unit="none",  # type: MeasurementUnit
+    tags=None,  # type: Optional[MetricTags]
+    timestamp=None,  # type: Optional[Union[float, datetime]]
+    stacklevel=0,  # type: int
+):
+    # type: (...) -> None
+    """Emits a distribution."""
+    aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+        key, value, unit, tags
+    )
+    if aggregator is not None:
+        aggregator.add(
+            "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
+        )
+
+
+def set(
+    key,  # type: str
+    value,  # type: Union[int, str]
+    unit="none",  # type: MeasurementUnit
+    tags=None,  # type: Optional[MetricTags]
+    timestamp=None,  # type: Optional[Union[float, datetime]]
+    stacklevel=0,  # type: int
+):
+    # type: (...) -> None
+    """Emits a set."""
+    aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+        key, value, unit, tags
+    )
+    if aggregator is not None:
+        aggregator.add(
+            "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel
+        )
+
+
+def gauge(
+    key,  # type: str
+    value,  # type: float
+    unit="none",  # type: MeasurementUnit
+    tags=None,  # type: Optional[MetricTags]
+    timestamp=None,  # type: Optional[Union[float, datetime]]
+    stacklevel=0,  # type: int
+):
+    # type: (...) -> None
+    """Emits a gauge."""
+    aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
+        key, value, unit, tags
+    )
+    if aggregator is not None:
+        aggregator.add(
+            "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel
+        )
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/monitor.py b/.venv/lib/python3.12/site-packages/sentry_sdk/monitor.py
new file mode 100644
index 00000000..68d9017b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/monitor.py
@@ -0,0 +1,124 @@
+import os
+import time
+from threading import Thread, Lock
+
+import sentry_sdk
+from sentry_sdk.utils import logger
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+
+
+MAX_DOWNSAMPLE_FACTOR = 10
+
+
+class Monitor:
+    """
+    Performs health checks in a separate thread once every interval seconds
+    and updates the internal state. Other parts of the SDK only read this state
+    and act accordingly.
+    """
+
+    name = "sentry.monitor"
+
+    def __init__(self, transport, interval=10):
+        # type: (sentry_sdk.transport.Transport, float) -> None
+        self.transport = transport  # type: sentry_sdk.transport.Transport
+        self.interval = interval  # type: float
+
+        self._healthy = True
+        self._downsample_factor = 0  # type: int
+
+        self._thread = None  # type: Optional[Thread]
+        self._thread_lock = Lock()
+        self._thread_for_pid = None  # type: Optional[int]
+        self._running = True
+
+    def _ensure_running(self):
+        # type: () -> None
+        """
+        Check that the monitor has an active thread to run in, or create one if not.
+
+        Note that this might fail (e.g. in Python 3.12 it's not possible to
+        spawn new threads at interpreter shutdown). In that case self._running
+        will be False after running this function.
+        """
+        if self._thread_for_pid == os.getpid() and self._thread is not None:
+            return None
+
+        with self._thread_lock:
+            if self._thread_for_pid == os.getpid() and self._thread is not None:
+                return None
+
+            def _thread():
+                # type: (...) -> None
+                while self._running:
+                    time.sleep(self.interval)
+                    if self._running:
+                        self.run()
+
+            thread = Thread(name=self.name, target=_thread)
+            thread.daemon = True
+            try:
+                thread.start()
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self._running = False
+                return None
+
+            self._thread = thread
+            self._thread_for_pid = os.getpid()
+
+        return None
+
+    def run(self):
+        # type: () -> None
+        self.check_health()
+        self.set_downsample_factor()
+
+    def set_downsample_factor(self):
+        # type: () -> None
+        if self._healthy:
+            if self._downsample_factor > 0:
+                logger.debug(
+                    "[Monitor] health check positive, reverting to normal sampling"
+                )
+            self._downsample_factor = 0
+        else:
+            if self.downsample_factor < MAX_DOWNSAMPLE_FACTOR:
+                self._downsample_factor += 1
+            logger.debug(
+                "[Monitor] health check negative, downsampling with a factor of %d",
+                self._downsample_factor,
+            )
+
+    def check_health(self):
+        # type: () -> None
+        """
+        Perform the actual health checks,
+        currently only checks if the transport is rate-limited.
+        TODO: augment in the future with more checks.
+        """
+        self._healthy = self.transport.is_healthy()
+
+    def is_healthy(self):
+        # type: () -> bool
+        self._ensure_running()
+        return self._healthy
+
+    @property
+    def downsample_factor(self):
+        # type: () -> int
+        self._ensure_running()
+        return self._downsample_factor
+
+    def kill(self):
+        # type: () -> None
+        self._running = False
+
+    def __del__(self):
+        # type: () -> None
+        self.kill()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/__init__.py b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/__init__.py
new file mode 100644
index 00000000..0bc63e3a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/__init__.py
@@ -0,0 +1,49 @@
+from sentry_sdk.profiler.continuous_profiler import (
+    start_profile_session,
+    start_profiler,
+    stop_profile_session,
+    stop_profiler,
+)
+from sentry_sdk.profiler.transaction_profiler import (
+    MAX_PROFILE_DURATION_NS,
+    PROFILE_MINIMUM_SAMPLES,
+    Profile,
+    Scheduler,
+    ThreadScheduler,
+    GeventScheduler,
+    has_profiling_enabled,
+    setup_profiler,
+    teardown_profiler,
+)
+from sentry_sdk.profiler.utils import (
+    DEFAULT_SAMPLING_FREQUENCY,
+    MAX_STACK_DEPTH,
+    get_frame_name,
+    extract_frame,
+    extract_stack,
+    frame_id,
+)
+
+__all__ = [
+    "start_profile_session",  # TODO: Deprecate this in favor of `start_profiler`
+    "start_profiler",
+    "stop_profile_session",  # TODO: Deprecate this in favor of `stop_profiler`
+    "stop_profiler",
+    # DEPRECATED: The following was re-exported for backwards compatibility. It
+    # will be removed from sentry_sdk.profiler in a future release.
+    "MAX_PROFILE_DURATION_NS",
+    "PROFILE_MINIMUM_SAMPLES",
+    "Profile",
+    "Scheduler",
+    "ThreadScheduler",
+    "GeventScheduler",
+    "has_profiling_enabled",
+    "setup_profiler",
+    "teardown_profiler",
+    "DEFAULT_SAMPLING_FREQUENCY",
+    "MAX_STACK_DEPTH",
+    "get_frame_name",
+    "extract_frame",
+    "extract_stack",
+    "frame_id",
+]
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/continuous_profiler.py b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/continuous_profiler.py
new file mode 100644
index 00000000..77ba60db
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/continuous_profiler.py
@@ -0,0 +1,704 @@
+import atexit
+import os
+import random
+import sys
+import threading
+import time
+import uuid
+import warnings
+from collections import deque
+from datetime import datetime, timezone
+
+from sentry_sdk.consts import VERSION
+from sentry_sdk.envelope import Envelope
+from sentry_sdk._lru_cache import LRUCache
+from sentry_sdk.profiler.utils import (
+    DEFAULT_SAMPLING_FREQUENCY,
+    extract_stack,
+)
+from sentry_sdk.utils import (
+    capture_internal_exception,
+    is_gevent,
+    logger,
+    now,
+    set_in_app_in_frames,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Deque
+    from typing import Dict
+    from typing import List
+    from typing import Optional
+    from typing import Set
+    from typing import Type
+    from typing import Union
+    from typing_extensions import TypedDict
+    from sentry_sdk._types import ContinuousProfilerMode, SDKInfo
+    from sentry_sdk.profiler.utils import (
+        ExtractedSample,
+        FrameId,
+        StackId,
+        ThreadId,
+        ProcessedFrame,
+        ProcessedStack,
+    )
+
+    ProcessedSample = TypedDict(
+        "ProcessedSample",
+        {
+            "timestamp": float,
+            "thread_id": ThreadId,
+            "stack_id": int,
+        },
+    )
+
+
+try:
+    from gevent.monkey import get_original
+    from gevent.threadpool import ThreadPool as _ThreadPool
+
+    ThreadPool = _ThreadPool  # type: Optional[Type[_ThreadPool]]
+    thread_sleep = get_original("time", "sleep")
+except ImportError:
+    thread_sleep = time.sleep
+    ThreadPool = None
+
+
+_scheduler = None  # type: Optional[ContinuousScheduler]
+
+
+def setup_continuous_profiler(options, sdk_info, capture_func):
+    # type: (Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> bool
+    global _scheduler
+
+    if _scheduler is not None:
+        logger.debug("[Profiling] Continuous Profiler is already setup")
+        return False
+
+    if is_gevent():
+        # If gevent has patched the threading modules then we cannot rely on
+        # them to spawn a native thread for sampling.
+        # Instead we default to the GeventContinuousScheduler which is capable of
+        # spawning native threads within gevent.
+        default_profiler_mode = GeventContinuousScheduler.mode
+    else:
+        default_profiler_mode = ThreadContinuousScheduler.mode
+
+    if options.get("profiler_mode") is not None:
+        profiler_mode = options["profiler_mode"]
+    else:
+        # TODO: deprecate this and just use the existing `profiler_mode`
+        experiments = options.get("_experiments", {})
+
+        profiler_mode = (
+            experiments.get("continuous_profiling_mode") or default_profiler_mode
+        )
+
+    frequency = DEFAULT_SAMPLING_FREQUENCY
+
+    if profiler_mode == ThreadContinuousScheduler.mode:
+        _scheduler = ThreadContinuousScheduler(
+            frequency, options, sdk_info, capture_func
+        )
+    elif profiler_mode == GeventContinuousScheduler.mode:
+        _scheduler = GeventContinuousScheduler(
+            frequency, options, sdk_info, capture_func
+        )
+    else:
+        raise ValueError("Unknown continuous profiler mode: {}".format(profiler_mode))
+
+    logger.debug(
+        "[Profiling] Setting up continuous profiler in {mode} mode".format(
+            mode=_scheduler.mode
+        )
+    )
+
+    atexit.register(teardown_continuous_profiler)
+
+    return True
+
+
+def try_autostart_continuous_profiler():
+    # type: () -> None
+
+    # TODO: deprecate this as it'll be replaced by the auto lifecycle option
+
+    if _scheduler is None:
+        return
+
+    if not _scheduler.is_auto_start_enabled():
+        return
+
+    _scheduler.manual_start()
+
+
+def try_profile_lifecycle_trace_start():
+    # type: () -> Union[ContinuousProfile, None]
+    if _scheduler is None:
+        return None
+
+    return _scheduler.auto_start()
+
+
+def start_profiler():
+    # type: () -> None
+    if _scheduler is None:
+        return
+
+    _scheduler.manual_start()
+
+
+def start_profile_session():
+    # type: () -> None
+
+    warnings.warn(
+        "The `start_profile_session` function is deprecated. Please use `start_profile` instead.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    start_profiler()
+
+
+def stop_profiler():
+    # type: () -> None
+    if _scheduler is None:
+        return
+
+    _scheduler.manual_stop()
+
+
+def stop_profile_session():
+    # type: () -> None
+
+    warnings.warn(
+        "The `stop_profile_session` function is deprecated. Please use `stop_profile` instead.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    stop_profiler()
+
+
+def teardown_continuous_profiler():
+    # type: () -> None
+    stop_profiler()
+
+    global _scheduler
+    _scheduler = None
+
+
+def get_profiler_id():
+    # type: () -> Union[str, None]
+    if _scheduler is None:
+        return None
+    return _scheduler.profiler_id
+
+
+def determine_profile_session_sampling_decision(sample_rate):
+    # type: (Union[float, None]) -> bool
+
+    # `None` is treated as `0.0`
+    if not sample_rate:
+        return False
+
+    return random.random() < float(sample_rate)
+
+
+class ContinuousProfile:
+    active: bool = True
+
+    def stop(self):
+        # type: () -> None
+        self.active = False
+
+
+class ContinuousScheduler:
+    mode = "unknown"  # type: ContinuousProfilerMode
+
+    def __init__(self, frequency, options, sdk_info, capture_func):
+        # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
+        self.interval = 1.0 / frequency
+        self.options = options
+        self.sdk_info = sdk_info
+        self.capture_func = capture_func
+
+        self.lifecycle = self.options.get("profile_lifecycle")
+        profile_session_sample_rate = self.options.get("profile_session_sample_rate")
+        self.sampled = determine_profile_session_sampling_decision(
+            profile_session_sample_rate
+        )
+
+        self.sampler = self.make_sampler()
+        self.buffer = None  # type: Optional[ProfileBuffer]
+        self.pid = None  # type: Optional[int]
+
+        self.running = False
+
+        self.new_profiles = deque(maxlen=128)  # type: Deque[ContinuousProfile]
+        self.active_profiles = set()  # type: Set[ContinuousProfile]
+
+    def is_auto_start_enabled(self):
+        # type: () -> bool
+
+        # Ensure that the scheduler only autostarts once per process.
+        # This is necessary because many web servers use forks to spawn
+        # additional processes. And the profiler is only spawned on the
+        # master process, then it often only profiles the main process
+        # and not the ones where the requests are being handled.
+        if self.pid == os.getpid():
+            return False
+
+        experiments = self.options.get("_experiments")
+        if not experiments:
+            return False
+
+        return experiments.get("continuous_profiling_auto_start")
+
+    def auto_start(self):
+        # type: () -> Union[ContinuousProfile, None]
+        if not self.sampled:
+            return None
+
+        if self.lifecycle != "trace":
+            return None
+
+        logger.debug("[Profiling] Auto starting profiler")
+
+        profile = ContinuousProfile()
+
+        self.new_profiles.append(profile)
+        self.ensure_running()
+
+        return profile
+
+    def manual_start(self):
+        # type: () -> None
+        if not self.sampled:
+            return
+
+        if self.lifecycle != "manual":
+            return
+
+        self.ensure_running()
+
+    def manual_stop(self):
+        # type: () -> None
+        if self.lifecycle != "manual":
+            return
+
+        self.teardown()
+
+    def ensure_running(self):
+        # type: () -> None
+        raise NotImplementedError
+
+    def teardown(self):
+        # type: () -> None
+        raise NotImplementedError
+
+    def pause(self):
+        # type: () -> None
+        raise NotImplementedError
+
+    def reset_buffer(self):
+        # type: () -> None
+        self.buffer = ProfileBuffer(
+            self.options, self.sdk_info, PROFILE_BUFFER_SECONDS, self.capture_func
+        )
+
+    @property
+    def profiler_id(self):
+        # type: () -> Union[str, None]
+        if self.buffer is None:
+            return None
+        return self.buffer.profiler_id
+
+    def make_sampler(self):
+        # type: () -> Callable[..., None]
+        cwd = os.getcwd()
+
+        cache = LRUCache(max_size=256)
+
+        if self.lifecycle == "trace":
+
+            def _sample_stack(*args, **kwargs):
+                # type: (*Any, **Any) -> None
+                """
+                Take a sample of the stack on all the threads in the process.
+                This should be called at a regular interval to collect samples.
+                """
+
+                # no profiles taking place, so we can stop early
+                if not self.new_profiles and not self.active_profiles:
+                    self.running = False
+                    return
+
+                # This is the number of profiles we want to pop off.
+                # It's possible another thread adds a new profile to
+                # the list and we spend longer than we want inside
+                # the loop below.
+                #
+                # Also make sure to set this value before extracting
+                # frames so we do not write to any new profiles that
+                # were started after this point.
+                new_profiles = len(self.new_profiles)
+
+                ts = now()
+
+                try:
+                    sample = [
+                        (str(tid), extract_stack(frame, cache, cwd))
+                        for tid, frame in sys._current_frames().items()
+                    ]
+                except AttributeError:
+                    # For some reason, the frame we get doesn't have certain attributes.
+                    # When this happens, we abandon the current sample as it's bad.
+                    capture_internal_exception(sys.exc_info())
+                    return
+
+                # Move the new profiles into the active_profiles set.
+                #
+                # We cannot directly add the to active_profiles set
+                # in `start_profiling` because it is called from other
+                # threads which can cause a RuntimeError when it the
+                # set sizes changes during iteration without a lock.
+                #
+                # We also want to avoid using a lock here so threads
+                # that are starting profiles are not blocked until it
+                # can acquire the lock.
+                for _ in range(new_profiles):
+                    self.active_profiles.add(self.new_profiles.popleft())
+                inactive_profiles = []
+
+                for profile in self.active_profiles:
+                    if profile.active:
+                        pass
+                    else:
+                        # If a profile is marked inactive, we buffer it
+                        # to `inactive_profiles` so it can be removed.
+                        # We cannot remove it here as it would result
+                        # in a RuntimeError.
+                        inactive_profiles.append(profile)
+
+                for profile in inactive_profiles:
+                    self.active_profiles.remove(profile)
+
+                if self.buffer is not None:
+                    self.buffer.write(ts, sample)
+
+        else:
+
+            def _sample_stack(*args, **kwargs):
+                # type: (*Any, **Any) -> None
+                """
+                Take a sample of the stack on all the threads in the process.
+                This should be called at a regular interval to collect samples.
+                """
+
+                ts = now()
+
+                try:
+                    sample = [
+                        (str(tid), extract_stack(frame, cache, cwd))
+                        for tid, frame in sys._current_frames().items()
+                    ]
+                except AttributeError:
+                    # For some reason, the frame we get doesn't have certain attributes.
+                    # When this happens, we abandon the current sample as it's bad.
+                    capture_internal_exception(sys.exc_info())
+                    return
+
+                if self.buffer is not None:
+                    self.buffer.write(ts, sample)
+
+        return _sample_stack
+
+    def run(self):
+        # type: () -> None
+        last = time.perf_counter()
+
+        while self.running:
+            self.sampler()
+
+            # some time may have elapsed since the last time
+            # we sampled, so we need to account for that and
+            # not sleep for too long
+            elapsed = time.perf_counter() - last
+            if elapsed < self.interval:
+                thread_sleep(self.interval - elapsed)
+
+            # after sleeping, make sure to take the current
+            # timestamp so we can use it next iteration
+            last = time.perf_counter()
+
+        if self.buffer is not None:
+            self.buffer.flush()
+            self.buffer = None
+
+
+class ThreadContinuousScheduler(ContinuousScheduler):
+    """
+    This scheduler is based on running a daemon thread that will call
+    the sampler at a regular interval.
+    """
+
+    mode = "thread"  # type: ContinuousProfilerMode
+    name = "sentry.profiler.ThreadContinuousScheduler"
+
+    def __init__(self, frequency, options, sdk_info, capture_func):
+        # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
+        super().__init__(frequency, options, sdk_info, capture_func)
+
+        self.thread = None  # type: Optional[threading.Thread]
+        self.lock = threading.Lock()
+
+    def ensure_running(self):
+        # type: () -> None
+
+        pid = os.getpid()
+
+        # is running on the right process
+        if self.running and self.pid == pid:
+            return
+
+        with self.lock:
+            # another thread may have tried to acquire the lock
+            # at the same time so it may start another thread
+            # make sure to check again before proceeding
+            if self.running and self.pid == pid:
+                return
+
+            self.pid = pid
+            self.running = True
+
+            # if the profiler thread is changing,
+            # we should create a new buffer along with it
+            self.reset_buffer()
+
+            # make sure the thread is a daemon here otherwise this
+            # can keep the application running after other threads
+            # have exited
+            self.thread = threading.Thread(name=self.name, target=self.run, daemon=True)
+
+            try:
+                self.thread.start()
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self.running = False
+                self.thread = None
+
+    def teardown(self):
+        # type: () -> None
+        if self.running:
+            self.running = False
+
+        if self.thread is not None:
+            self.thread.join()
+            self.thread = None
+
+        self.buffer = None
+
+
+class GeventContinuousScheduler(ContinuousScheduler):
+    """
+    This scheduler is based on the thread scheduler but adapted to work with
+    gevent. When using gevent, it may monkey patch the threading modules
+    (`threading` and `_thread`). This results in the use of greenlets instead
+    of native threads.
+
+    This is an issue because the sampler CANNOT run in a greenlet because
+    1. Other greenlets doing sync work will prevent the sampler from running
+    2. The greenlet runs in the same thread as other greenlets so when taking
+       a sample, other greenlets will have been evicted from the thread. This
+       results in a sample containing only the sampler's code.
+    """
+
+    mode = "gevent"  # type: ContinuousProfilerMode
+
+    def __init__(self, frequency, options, sdk_info, capture_func):
+        # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
+
+        if ThreadPool is None:
+            raise ValueError("Profiler mode: {} is not available".format(self.mode))
+
+        super().__init__(frequency, options, sdk_info, capture_func)
+
+        self.thread = None  # type: Optional[_ThreadPool]
+        self.lock = threading.Lock()
+
+    def ensure_running(self):
+        # type: () -> None
+        pid = os.getpid()
+
+        # is running on the right process
+        if self.running and self.pid == pid:
+            return
+
+        with self.lock:
+            # another thread may have tried to acquire the lock
+            # at the same time so it may start another thread
+            # make sure to check again before proceeding
+            if self.running and self.pid == pid:
+                return
+
+            self.pid = pid
+            self.running = True
+
+            # if the profiler thread is changing,
+            # we should create a new buffer along with it
+            self.reset_buffer()
+
+            self.thread = ThreadPool(1)  # type: ignore[misc]
+            try:
+                self.thread.spawn(self.run)
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self.running = False
+                self.thread = None
+
+    def teardown(self):
+        # type: () -> None
+        if self.running:
+            self.running = False
+
+        if self.thread is not None:
+            self.thread.join()
+            self.thread = None
+
+        self.buffer = None
+
+
+PROFILE_BUFFER_SECONDS = 60
+
+
+class ProfileBuffer:
+    def __init__(self, options, sdk_info, buffer_size, capture_func):
+        # type: (Dict[str, Any], SDKInfo, int, Callable[[Envelope], None]) -> None
+        self.options = options
+        self.sdk_info = sdk_info
+        self.buffer_size = buffer_size
+        self.capture_func = capture_func
+
+        self.profiler_id = uuid.uuid4().hex
+        self.chunk = ProfileChunk()
+
+        # Make sure to use the same clock to compute a sample's monotonic timestamp
+        # to ensure the timestamps are correctly aligned.
+        self.start_monotonic_time = now()
+
+        # Make sure the start timestamp is defined only once per profiler id.
+        # This prevents issues with clock drift within a single profiler session.
+        #
+        # Subtracting the start_monotonic_time here to find a fixed starting position
+        # for relative monotonic timestamps for each sample.
+        self.start_timestamp = (
+            datetime.now(timezone.utc).timestamp() - self.start_monotonic_time
+        )
+
+    def write(self, monotonic_time, sample):
+        # type: (float, ExtractedSample) -> None
+        if self.should_flush(monotonic_time):
+            self.flush()
+            self.chunk = ProfileChunk()
+            self.start_monotonic_time = now()
+
+        self.chunk.write(self.start_timestamp + monotonic_time, sample)
+
+    def should_flush(self, monotonic_time):
+        # type: (float) -> bool
+
+        # If the delta between the new monotonic time and the start monotonic time
+        # exceeds the buffer size, it means we should flush the chunk
+        return monotonic_time - self.start_monotonic_time >= self.buffer_size
+
+    def flush(self):
+        # type: () -> None
+        chunk = self.chunk.to_json(self.profiler_id, self.options, self.sdk_info)
+        envelope = Envelope()
+        envelope.add_profile_chunk(chunk)
+        self.capture_func(envelope)
+
+
+class ProfileChunk:
+    def __init__(self):
+        # type: () -> None
+        self.chunk_id = uuid.uuid4().hex
+
+        self.indexed_frames = {}  # type: Dict[FrameId, int]
+        self.indexed_stacks = {}  # type: Dict[StackId, int]
+        self.frames = []  # type: List[ProcessedFrame]
+        self.stacks = []  # type: List[ProcessedStack]
+        self.samples = []  # type: List[ProcessedSample]
+
+    def write(self, ts, sample):
+        # type: (float, ExtractedSample) -> None
+        for tid, (stack_id, frame_ids, frames) in sample:
+            try:
+                # Check if the stack is indexed first, this lets us skip
+                # indexing frames if it's not necessary
+                if stack_id not in self.indexed_stacks:
+                    for i, frame_id in enumerate(frame_ids):
+                        if frame_id not in self.indexed_frames:
+                            self.indexed_frames[frame_id] = len(self.indexed_frames)
+                            self.frames.append(frames[i])
+
+                    self.indexed_stacks[stack_id] = len(self.indexed_stacks)
+                    self.stacks.append(
+                        [self.indexed_frames[frame_id] for frame_id in frame_ids]
+                    )
+
+                self.samples.append(
+                    {
+                        "timestamp": ts,
+                        "thread_id": tid,
+                        "stack_id": self.indexed_stacks[stack_id],
+                    }
+                )
+            except AttributeError:
+                # For some reason, the frame we get doesn't have certain attributes.
+                # When this happens, we abandon the current sample as it's bad.
+                capture_internal_exception(sys.exc_info())
+
+    def to_json(self, profiler_id, options, sdk_info):
+        # type: (str, Dict[str, Any], SDKInfo) -> Dict[str, Any]
+        profile = {
+            "frames": self.frames,
+            "stacks": self.stacks,
+            "samples": self.samples,
+            "thread_metadata": {
+                str(thread.ident): {
+                    "name": str(thread.name),
+                }
+                for thread in threading.enumerate()
+            },
+        }
+
+        set_in_app_in_frames(
+            profile["frames"],
+            options["in_app_exclude"],
+            options["in_app_include"],
+            options["project_root"],
+        )
+
+        payload = {
+            "chunk_id": self.chunk_id,
+            "client_sdk": {
+                "name": sdk_info["name"],
+                "version": VERSION,
+            },
+            "platform": "python",
+            "profile": profile,
+            "profiler_id": profiler_id,
+            "version": "2",
+        }
+
+        for key in "release", "environment", "dist":
+            if options[key] is not None:
+                payload[key] = str(options[key]).strip()
+
+        return payload
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/transaction_profiler.py b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/transaction_profiler.py
new file mode 100644
index 00000000..3743b7c9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/transaction_profiler.py
@@ -0,0 +1,837 @@
+"""
+This file is originally based on code from https://github.com/nylas/nylas-perftools,
+which is published under the following license:
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Nylas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import atexit
+import os
+import platform
+import random
+import sys
+import threading
+import time
+import uuid
+import warnings
+from abc import ABC, abstractmethod
+from collections import deque
+
+import sentry_sdk
+from sentry_sdk._lru_cache import LRUCache
+from sentry_sdk.profiler.utils import (
+    DEFAULT_SAMPLING_FREQUENCY,
+    extract_stack,
+)
+from sentry_sdk.utils import (
+    capture_internal_exception,
+    get_current_thread_meta,
+    is_gevent,
+    is_valid_sample_rate,
+    logger,
+    nanosecond_time,
+    set_in_app_in_frames,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Deque
+    from typing import Dict
+    from typing import List
+    from typing import Optional
+    from typing import Set
+    from typing import Type
+    from typing_extensions import TypedDict
+
+    from sentry_sdk.profiler.utils import (
+        ProcessedStack,
+        ProcessedFrame,
+        ProcessedThreadMetadata,
+        FrameId,
+        StackId,
+        ThreadId,
+        ExtractedSample,
+    )
+    from sentry_sdk._types import Event, SamplingContext, ProfilerMode
+
+    ProcessedSample = TypedDict(
+        "ProcessedSample",
+        {
+            "elapsed_since_start_ns": str,
+            "thread_id": ThreadId,
+            "stack_id": int,
+        },
+    )
+
+    ProcessedProfile = TypedDict(
+        "ProcessedProfile",
+        {
+            "frames": List[ProcessedFrame],
+            "stacks": List[ProcessedStack],
+            "samples": List[ProcessedSample],
+            "thread_metadata": Dict[ThreadId, ProcessedThreadMetadata],
+        },
+    )
+
+
+try:
+    from gevent.monkey import get_original
+    from gevent.threadpool import ThreadPool as _ThreadPool
+
+    ThreadPool = _ThreadPool  # type: Optional[Type[_ThreadPool]]
+    thread_sleep = get_original("time", "sleep")
+except ImportError:
+    thread_sleep = time.sleep
+
+    ThreadPool = None
+
+
+_scheduler = None  # type: Optional[Scheduler]
+
+
+# The minimum number of unique samples that must exist in a profile to be
+# considered valid.
+PROFILE_MINIMUM_SAMPLES = 2
+
+
+def has_profiling_enabled(options):
+    # type: (Dict[str, Any]) -> bool
+    profiles_sampler = options["profiles_sampler"]
+    if profiles_sampler is not None:
+        return True
+
+    profiles_sample_rate = options["profiles_sample_rate"]
+    if profiles_sample_rate is not None and profiles_sample_rate > 0:
+        return True
+
+    profiles_sample_rate = options["_experiments"].get("profiles_sample_rate")
+    if profiles_sample_rate is not None:
+        logger.warning(
+            "_experiments['profiles_sample_rate'] is deprecated. "
+            "Please use the non-experimental profiles_sample_rate option "
+            "directly."
+        )
+        if profiles_sample_rate > 0:
+            return True
+
+    return False
+
+
+def setup_profiler(options):
+    # type: (Dict[str, Any]) -> bool
+    global _scheduler
+
+    if _scheduler is not None:
+        logger.debug("[Profiling] Profiler is already setup")
+        return False
+
+    frequency = DEFAULT_SAMPLING_FREQUENCY
+
+    if is_gevent():
+        # If gevent has patched the threading modules then we cannot rely on
+        # them to spawn a native thread for sampling.
+        # Instead we default to the GeventScheduler which is capable of
+        # spawning native threads within gevent.
+        default_profiler_mode = GeventScheduler.mode
+    else:
+        default_profiler_mode = ThreadScheduler.mode
+
+    if options.get("profiler_mode") is not None:
+        profiler_mode = options["profiler_mode"]
+    else:
+        profiler_mode = options.get("_experiments", {}).get("profiler_mode")
+        if profiler_mode is not None:
+            logger.warning(
+                "_experiments['profiler_mode'] is deprecated. Please use the "
+                "non-experimental profiler_mode option directly."
+            )
+        profiler_mode = profiler_mode or default_profiler_mode
+
+    if (
+        profiler_mode == ThreadScheduler.mode
+        # for legacy reasons, we'll keep supporting sleep mode for this scheduler
+        or profiler_mode == "sleep"
+    ):
+        _scheduler = ThreadScheduler(frequency=frequency)
+    elif profiler_mode == GeventScheduler.mode:
+        _scheduler = GeventScheduler(frequency=frequency)
+    else:
+        raise ValueError("Unknown profiler mode: {}".format(profiler_mode))
+
+    logger.debug(
+        "[Profiling] Setting up profiler in {mode} mode".format(mode=_scheduler.mode)
+    )
+    _scheduler.setup()
+
+    atexit.register(teardown_profiler)
+
+    return True
+
+
+def teardown_profiler():
+    # type: () -> None
+
+    global _scheduler
+
+    if _scheduler is not None:
+        _scheduler.teardown()
+
+    _scheduler = None
+
+
+MAX_PROFILE_DURATION_NS = int(3e10)  # 30 seconds
+
+
+class Profile:
+    def __init__(
+        self,
+        sampled,  # type: Optional[bool]
+        start_ns,  # type: int
+        hub=None,  # type: Optional[sentry_sdk.Hub]
+        scheduler=None,  # type: Optional[Scheduler]
+    ):
+        # type: (...) -> None
+        self.scheduler = _scheduler if scheduler is None else scheduler
+
+        self.event_id = uuid.uuid4().hex  # type: str
+
+        self.sampled = sampled  # type: Optional[bool]
+
+        # Various framework integrations are capable of overwriting the active thread id.
+        # If it is set to `None` at the end of the profile, we fall back to the default.
+        self._default_active_thread_id = get_current_thread_meta()[0] or 0  # type: int
+        self.active_thread_id = None  # type: Optional[int]
+
+        try:
+            self.start_ns = start_ns  # type: int
+        except AttributeError:
+            self.start_ns = 0
+
+        self.stop_ns = 0  # type: int
+        self.active = False  # type: bool
+
+        self.indexed_frames = {}  # type: Dict[FrameId, int]
+        self.indexed_stacks = {}  # type: Dict[StackId, int]
+        self.frames = []  # type: List[ProcessedFrame]
+        self.stacks = []  # type: List[ProcessedStack]
+        self.samples = []  # type: List[ProcessedSample]
+
+        self.unique_samples = 0
+
+        # Backwards compatibility with the old hub property
+        self._hub = None  # type: Optional[sentry_sdk.Hub]
+        if hub is not None:
+            self._hub = hub
+            warnings.warn(
+                "The `hub` parameter is deprecated. Please do not use it.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+    def update_active_thread_id(self):
+        # type: () -> None
+        self.active_thread_id = get_current_thread_meta()[0]
+        logger.debug(
+            "[Profiling] updating active thread id to {tid}".format(
+                tid=self.active_thread_id
+            )
+        )
+
+    def _set_initial_sampling_decision(self, sampling_context):
+        # type: (SamplingContext) -> None
+        """
+        Sets the profile's sampling decision according to the following
+        precedence rules:
+
+        1. If the transaction to be profiled is not sampled, that decision
+        will be used, regardless of anything else.
+
+        2. Use `profiles_sample_rate` to decide.
+        """
+
+        # The corresponding transaction was not sampled,
+        # so don't generate a profile for it.
+        if not self.sampled:
+            logger.debug(
+                "[Profiling] Discarding profile because transaction is discarded."
+            )
+            self.sampled = False
+            return
+
+        # The profiler hasn't been properly initialized.
+        if self.scheduler is None:
+            logger.debug(
+                "[Profiling] Discarding profile because profiler was not started."
+            )
+            self.sampled = False
+            return
+
+        client = sentry_sdk.get_client()
+        if not client.is_active():
+            self.sampled = False
+            return
+
+        options = client.options
+
+        if callable(options.get("profiles_sampler")):
+            sample_rate = options["profiles_sampler"](sampling_context)
+        elif options["profiles_sample_rate"] is not None:
+            sample_rate = options["profiles_sample_rate"]
+        else:
+            sample_rate = options["_experiments"].get("profiles_sample_rate")
+
+        # The profiles_sample_rate option was not set, so profiling
+        # was never enabled.
+        if sample_rate is None:
+            logger.debug(
+                "[Profiling] Discarding profile because profiling was not enabled."
+            )
+            self.sampled = False
+            return
+
+        if not is_valid_sample_rate(sample_rate, source="Profiling"):
+            logger.warning(
+                "[Profiling] Discarding profile because of invalid sample rate."
+            )
+            self.sampled = False
+            return
+
+        # Now we roll the dice. random.random is inclusive of 0, but not of 1,
+        # so strict < is safe here. In case sample_rate is a boolean, cast it
+        # to a float (True becomes 1.0 and False becomes 0.0)
+        self.sampled = random.random() < float(sample_rate)
+
+        if self.sampled:
+            logger.debug("[Profiling] Initializing profile")
+        else:
+            logger.debug(
+                "[Profiling] Discarding profile because it's not included in the random sample (sample rate = {sample_rate})".format(
+                    sample_rate=float(sample_rate)
+                )
+            )
+
+    def start(self):
+        # type: () -> None
+        if not self.sampled or self.active:
+            return
+
+        assert self.scheduler, "No scheduler specified"
+        logger.debug("[Profiling] Starting profile")
+        self.active = True
+        if not self.start_ns:
+            self.start_ns = nanosecond_time()
+        self.scheduler.start_profiling(self)
+
+    def stop(self):
+        # type: () -> None
+        if not self.sampled or not self.active:
+            return
+
+        assert self.scheduler, "No scheduler specified"
+        logger.debug("[Profiling] Stopping profile")
+        self.active = False
+        self.stop_ns = nanosecond_time()
+
+    def __enter__(self):
+        # type: () -> Profile
+        scope = sentry_sdk.get_isolation_scope()
+        old_profile = scope.profile
+        scope.profile = self
+
+        self._context_manager_state = (scope, old_profile)
+
+        self.start()
+
+        return self
+
+    def __exit__(self, ty, value, tb):
+        # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
+        self.stop()
+
+        scope, old_profile = self._context_manager_state
+        del self._context_manager_state
+
+        scope.profile = old_profile
+
+    def write(self, ts, sample):
+        # type: (int, ExtractedSample) -> None
+        if not self.active:
+            return
+
+        if ts < self.start_ns:
+            return
+
+        offset = ts - self.start_ns
+        if offset > MAX_PROFILE_DURATION_NS:
+            self.stop()
+            return
+
+        self.unique_samples += 1
+
+        elapsed_since_start_ns = str(offset)
+
+        for tid, (stack_id, frame_ids, frames) in sample:
+            try:
+                # Check if the stack is indexed first, this lets us skip
+                # indexing frames if it's not necessary
+                if stack_id not in self.indexed_stacks:
+                    for i, frame_id in enumerate(frame_ids):
+                        if frame_id not in self.indexed_frames:
+                            self.indexed_frames[frame_id] = len(self.indexed_frames)
+                            self.frames.append(frames[i])
+
+                    self.indexed_stacks[stack_id] = len(self.indexed_stacks)
+                    self.stacks.append(
+                        [self.indexed_frames[frame_id] for frame_id in frame_ids]
+                    )
+
+                self.samples.append(
+                    {
+                        "elapsed_since_start_ns": elapsed_since_start_ns,
+                        "thread_id": tid,
+                        "stack_id": self.indexed_stacks[stack_id],
+                    }
+                )
+            except AttributeError:
+                # For some reason, the frame we get doesn't have certain attributes.
+                # When this happens, we abandon the current sample as it's bad.
+                capture_internal_exception(sys.exc_info())
+
+    def process(self):
+        # type: () -> ProcessedProfile
+
+        # This collects the thread metadata at the end of a profile. Doing it
+        # this way means that any threads that terminate before the profile ends
+        # will not have any metadata associated with it.
+        thread_metadata = {
+            str(thread.ident): {
+                "name": str(thread.name),
+            }
+            for thread in threading.enumerate()
+        }  # type: Dict[str, ProcessedThreadMetadata]
+
+        return {
+            "frames": self.frames,
+            "stacks": self.stacks,
+            "samples": self.samples,
+            "thread_metadata": thread_metadata,
+        }
+
+    def to_json(self, event_opt, options):
+        # type: (Event, Dict[str, Any]) -> Dict[str, Any]
+        profile = self.process()
+
+        set_in_app_in_frames(
+            profile["frames"],
+            options["in_app_exclude"],
+            options["in_app_include"],
+            options["project_root"],
+        )
+
+        return {
+            "environment": event_opt.get("environment"),
+            "event_id": self.event_id,
+            "platform": "python",
+            "profile": profile,
+            "release": event_opt.get("release", ""),
+            "timestamp": event_opt["start_timestamp"],
+            "version": "1",
+            "device": {
+                "architecture": platform.machine(),
+            },
+            "os": {
+                "name": platform.system(),
+                "version": platform.release(),
+            },
+            "runtime": {
+                "name": platform.python_implementation(),
+                "version": platform.python_version(),
+            },
+            "transactions": [
+                {
+                    "id": event_opt["event_id"],
+                    "name": event_opt["transaction"],
+                    # we start the transaction before the profile and this is
+                    # the transaction start time relative to the profile, so we
+                    # hardcode it to 0 until we can start the profile before
+                    "relative_start_ns": "0",
+                    # use the duration of the profile instead of the transaction
+                    # because we end the transaction after the profile
+                    "relative_end_ns": str(self.stop_ns - self.start_ns),
+                    "trace_id": event_opt["contexts"]["trace"]["trace_id"],
+                    "active_thread_id": str(
+                        self._default_active_thread_id
+                        if self.active_thread_id is None
+                        else self.active_thread_id
+                    ),
+                }
+            ],
+        }
+
+    def valid(self):
+        # type: () -> bool
+        client = sentry_sdk.get_client()
+        if not client.is_active():
+            return False
+
+        if not has_profiling_enabled(client.options):
+            return False
+
+        if self.sampled is None or not self.sampled:
+            if client.transport:
+                client.transport.record_lost_event(
+                    "sample_rate", data_category="profile"
+                )
+            return False
+
+        if self.unique_samples < PROFILE_MINIMUM_SAMPLES:
+            if client.transport:
+                client.transport.record_lost_event(
+                    "insufficient_data", data_category="profile"
+                )
+            logger.debug("[Profiling] Discarding profile because insufficient samples.")
+            return False
+
+        return True
+
+    @property
+    def hub(self):
+        # type: () -> Optional[sentry_sdk.Hub]
+        warnings.warn(
+            "The `hub` attribute is deprecated. Please do not access it.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self._hub
+
+    @hub.setter
+    def hub(self, value):
+        # type: (Optional[sentry_sdk.Hub]) -> None
+        warnings.warn(
+            "The `hub` attribute is deprecated. Please do not set it.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        self._hub = value
+
+
+class Scheduler(ABC):
+    mode = "unknown"  # type: ProfilerMode
+
+    def __init__(self, frequency):
+        # type: (int) -> None
+        self.interval = 1.0 / frequency
+
+        self.sampler = self.make_sampler()
+
+        # cap the number of new profiles at any time so it does not grow infinitely
+        self.new_profiles = deque(maxlen=128)  # type: Deque[Profile]
+        self.active_profiles = set()  # type: Set[Profile]
+
+    def __enter__(self):
+        # type: () -> Scheduler
+        self.setup()
+        return self
+
+    def __exit__(self, ty, value, tb):
+        # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
+        self.teardown()
+
+    @abstractmethod
+    def setup(self):
+        # type: () -> None
+        pass
+
+    @abstractmethod
+    def teardown(self):
+        # type: () -> None
+        pass
+
+    def ensure_running(self):
+        # type: () -> None
+        """
+        Ensure the scheduler is running. By default, this method is a no-op.
+        The method should be overridden by any implementation for which it is
+        relevant.
+        """
+        return None
+
+    def start_profiling(self, profile):
+        # type: (Profile) -> None
+        self.ensure_running()
+        self.new_profiles.append(profile)
+
+    def make_sampler(self):
+        # type: () -> Callable[..., None]
+        cwd = os.getcwd()
+
+        cache = LRUCache(max_size=256)
+
+        def _sample_stack(*args, **kwargs):
+            # type: (*Any, **Any) -> None
+            """
+            Take a sample of the stack on all the threads in the process.
+            This should be called at a regular interval to collect samples.
+            """
+            # no profiles taking place, so we can stop early
+            if not self.new_profiles and not self.active_profiles:
+                # make sure to clear the cache if we're not profiling so we dont
+                # keep a reference to the last stack of frames around
+                return
+
+            # This is the number of profiles we want to pop off.
+            # It's possible another thread adds a new profile to
+            # the list and we spend longer than we want inside
+            # the loop below.
+            #
+            # Also make sure to set this value before extracting
+            # frames so we do not write to any new profiles that
+            # were started after this point.
+            new_profiles = len(self.new_profiles)
+
+            now = nanosecond_time()
+
+            try:
+                sample = [
+                    (str(tid), extract_stack(frame, cache, cwd))
+                    for tid, frame in sys._current_frames().items()
+                ]
+            except AttributeError:
+                # For some reason, the frame we get doesn't have certain attributes.
+                # When this happens, we abandon the current sample as it's bad.
+                capture_internal_exception(sys.exc_info())
+                return
+
+            # Move the new profiles into the active_profiles set.
+            #
+            # We cannot directly add the to active_profiles set
+            # in `start_profiling` because it is called from other
+            # threads which can cause a RuntimeError when it the
+            # set sizes changes during iteration without a lock.
+            #
+            # We also want to avoid using a lock here so threads
+            # that are starting profiles are not blocked until it
+            # can acquire the lock.
+            for _ in range(new_profiles):
+                self.active_profiles.add(self.new_profiles.popleft())
+
+            inactive_profiles = []
+
+            for profile in self.active_profiles:
+                if profile.active:
+                    profile.write(now, sample)
+                else:
+                    # If a profile is marked inactive, we buffer it
+                    # to `inactive_profiles` so it can be removed.
+                    # We cannot remove it here as it would result
+                    # in a RuntimeError.
+                    inactive_profiles.append(profile)
+
+            for profile in inactive_profiles:
+                self.active_profiles.remove(profile)
+
+        return _sample_stack
+
+
+class ThreadScheduler(Scheduler):
+    """
+    This scheduler is based on running a daemon thread that will call
+    the sampler at a regular interval.
+    """
+
+    mode = "thread"  # type: ProfilerMode
+    name = "sentry.profiler.ThreadScheduler"
+
+    def __init__(self, frequency):
+        # type: (int) -> None
+        super().__init__(frequency=frequency)
+
+        # used to signal to the thread that it should stop
+        self.running = False
+        self.thread = None  # type: Optional[threading.Thread]
+        self.pid = None  # type: Optional[int]
+        self.lock = threading.Lock()
+
+    def setup(self):
+        # type: () -> None
+        pass
+
+    def teardown(self):
+        # type: () -> None
+        if self.running:
+            self.running = False
+            if self.thread is not None:
+                self.thread.join()
+
+    def ensure_running(self):
+        # type: () -> None
+        """
+        Check that the profiler has an active thread to run in, and start one if
+        that's not the case.
+
+        Note that this might fail (e.g. in Python 3.12 it's not possible to
+        spawn new threads at interpreter shutdown). In that case self.running
+        will be False after running this function.
+        """
+        pid = os.getpid()
+
+        # is running on the right process
+        if self.running and self.pid == pid:
+            return
+
+        with self.lock:
+            # another thread may have tried to acquire the lock
+            # at the same time so it may start another thread
+            # make sure to check again before proceeding
+            if self.running and self.pid == pid:
+                return
+
+            self.pid = pid
+            self.running = True
+
+            # make sure the thread is a daemon here otherwise this
+            # can keep the application running after other threads
+            # have exited
+            self.thread = threading.Thread(name=self.name, target=self.run, daemon=True)
+            try:
+                self.thread.start()
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self.running = False
+                self.thread = None
+                return
+
+    def run(self):
+        # type: () -> None
+        last = time.perf_counter()
+
+        while self.running:
+            self.sampler()
+
+            # some time may have elapsed since the last time
+            # we sampled, so we need to account for that and
+            # not sleep for too long
+            elapsed = time.perf_counter() - last
+            if elapsed < self.interval:
+                thread_sleep(self.interval - elapsed)
+
+            # after sleeping, make sure to take the current
+            # timestamp so we can use it next iteration
+            last = time.perf_counter()
+
+
+class GeventScheduler(Scheduler):
+    """
+    This scheduler is based on the thread scheduler but adapted to work with
+    gevent. When using gevent, it may monkey patch the threading modules
+    (`threading` and `_thread`). This results in the use of greenlets instead
+    of native threads.
+
+    This is an issue because the sampler CANNOT run in a greenlet because
+    1. Other greenlets doing sync work will prevent the sampler from running
+    2. The greenlet runs in the same thread as other greenlets so when taking
+       a sample, other greenlets will have been evicted from the thread. This
+       results in a sample containing only the sampler's code.
+    """
+
+    mode = "gevent"  # type: ProfilerMode
+    name = "sentry.profiler.GeventScheduler"
+
+    def __init__(self, frequency):
+        # type: (int) -> None
+
+        if ThreadPool is None:
+            raise ValueError("Profiler mode: {} is not available".format(self.mode))
+
+        super().__init__(frequency=frequency)
+
+        # used to signal to the thread that it should stop
+        self.running = False
+        self.thread = None  # type: Optional[_ThreadPool]
+        self.pid = None  # type: Optional[int]
+
+        # This intentionally uses the gevent patched threading.Lock.
+        # The lock will be required when first trying to start profiles
+        # as we need to spawn the profiler thread from the greenlets.
+        self.lock = threading.Lock()
+
+    def setup(self):
+        # type: () -> None
+        pass
+
+    def teardown(self):
+        # type: () -> None
+        if self.running:
+            self.running = False
+            if self.thread is not None:
+                self.thread.join()
+
+    def ensure_running(self):
+        # type: () -> None
+        pid = os.getpid()
+
+        # is running on the right process
+        if self.running and self.pid == pid:
+            return
+
+        with self.lock:
+            # another thread may have tried to acquire the lock
+            # at the same time so it may start another thread
+            # make sure to check again before proceeding
+            if self.running and self.pid == pid:
+                return
+
+            self.pid = pid
+            self.running = True
+
+            self.thread = ThreadPool(1)  # type: ignore[misc]
+            try:
+                self.thread.spawn(self.run)
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self.running = False
+                self.thread = None
+                return
+
+    def run(self):
+        # type: () -> None
+        last = time.perf_counter()
+
+        while self.running:
+            self.sampler()
+
+            # some time may have elapsed since the last time
+            # we sampled, so we need to account for that and
+            # not sleep for too long
+            elapsed = time.perf_counter() - last
+            if elapsed < self.interval:
+                thread_sleep(self.interval - elapsed)
+
+            # after sleeping, make sure to take the current
+            # timestamp so we can use it next iteration
+            last = time.perf_counter()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/utils.py
new file mode 100644
index 00000000..3554cddb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/profiler/utils.py
@@ -0,0 +1,199 @@
+import os
+from collections import deque
+
+from sentry_sdk._compat import PY311
+from sentry_sdk.utils import filename_for_module
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from sentry_sdk._lru_cache import LRUCache
+    from types import FrameType
+    from typing import Deque
+    from typing import List
+    from typing import Optional
+    from typing import Sequence
+    from typing import Tuple
+    from typing_extensions import TypedDict
+
+    ThreadId = str
+
+    ProcessedStack = List[int]
+
+    ProcessedFrame = TypedDict(
+        "ProcessedFrame",
+        {
+            "abs_path": str,
+            "filename": Optional[str],
+            "function": str,
+            "lineno": int,
+            "module": Optional[str],
+        },
+    )
+
+    ProcessedThreadMetadata = TypedDict(
+        "ProcessedThreadMetadata",
+        {"name": str},
+    )
+
+    FrameId = Tuple[
+        str,  # abs_path
+        int,  # lineno
+        str,  # function
+    ]
+    FrameIds = Tuple[FrameId, ...]
+
+    # The exact value of this id is not very meaningful. The purpose
+    # of this id is to give us a compact and unique identifier for a
+    # raw stack that can be used as a key to a dictionary so that it
+    # can be used during the sampled format generation.
+    StackId = Tuple[int, int]
+
+    ExtractedStack = Tuple[StackId, FrameIds, List[ProcessedFrame]]
+    ExtractedSample = Sequence[Tuple[ThreadId, ExtractedStack]]
+
+# The default sampling frequency to use. This is set at 101 in order to
+# mitigate the effects of lockstep sampling.
+DEFAULT_SAMPLING_FREQUENCY = 101
+
+
+# We want to impose a stack depth limit so that samples aren't too large.
+MAX_STACK_DEPTH = 128
+
+
+if PY311:
+
+    def get_frame_name(frame):
+        # type: (FrameType) -> str
+        return frame.f_code.co_qualname
+
+else:
+
+    def get_frame_name(frame):
+        # type: (FrameType) -> str
+
+        f_code = frame.f_code
+        co_varnames = f_code.co_varnames
+
+        # co_name only contains the frame name.  If the frame was a method,
+        # the class name will NOT be included.
+        name = f_code.co_name
+
+        # if it was a method, we can get the class name by inspecting
+        # the f_locals for the `self` argument
+        try:
+            if (
+                # the co_varnames start with the frame's positional arguments
+                # and we expect the first to be `self` if its an instance method
+                co_varnames
+                and co_varnames[0] == "self"
+                and "self" in frame.f_locals
+            ):
+                for cls in type(frame.f_locals["self"]).__mro__:
+                    if name in cls.__dict__:
+                        return "{}.{}".format(cls.__name__, name)
+        except (AttributeError, ValueError):
+            pass
+
+        # if it was a class method, (decorated with `@classmethod`)
+        # we can get the class name by inspecting the f_locals for the `cls` argument
+        try:
+            if (
+                # the co_varnames start with the frame's positional arguments
+                # and we expect the first to be `cls` if its a class method
+                co_varnames
+                and co_varnames[0] == "cls"
+                and "cls" in frame.f_locals
+            ):
+                for cls in frame.f_locals["cls"].__mro__:
+                    if name in cls.__dict__:
+                        return "{}.{}".format(cls.__name__, name)
+        except (AttributeError, ValueError):
+            pass
+
+        # nothing we can do if it is a staticmethod (decorated with @staticmethod)
+
+        # we've done all we can, time to give up and return what we have
+        return name
+
+
+def frame_id(raw_frame):
+    # type: (FrameType) -> FrameId
+    return (raw_frame.f_code.co_filename, raw_frame.f_lineno, get_frame_name(raw_frame))
+
+
+def extract_frame(fid, raw_frame, cwd):
+    # type: (FrameId, FrameType, str) -> ProcessedFrame
+    abs_path = raw_frame.f_code.co_filename
+
+    try:
+        module = raw_frame.f_globals["__name__"]
+    except Exception:
+        module = None
+
+    # namedtuples can be many times slower when initialing
+    # and accessing attribute so we opt to use a tuple here instead
+    return {
+        # This originally was `os.path.abspath(abs_path)` but that had
+        # a large performance overhead.
+        #
+        # According to docs, this is equivalent to
+        # `os.path.normpath(os.path.join(os.getcwd(), path))`.
+        # The `os.getcwd()` call is slow here, so we precompute it.
+        #
+        # Additionally, since we are using normalized path already,
+        # we skip calling `os.path.normpath` entirely.
+        "abs_path": os.path.join(cwd, abs_path),
+        "module": module,
+        "filename": filename_for_module(module, abs_path) or None,
+        "function": fid[2],
+        "lineno": raw_frame.f_lineno,
+    }
+
+
+def extract_stack(
+    raw_frame,  # type: Optional[FrameType]
+    cache,  # type: LRUCache
+    cwd,  # type: str
+    max_stack_depth=MAX_STACK_DEPTH,  # type: int
+):
+    # type: (...) -> ExtractedStack
+    """
+    Extracts the stack starting the specified frame. The extracted stack
+    assumes the specified frame is the top of the stack, and works back
+    to the bottom of the stack.
+
+    In the event that the stack is more than `MAX_STACK_DEPTH` frames deep,
+    only the first `MAX_STACK_DEPTH` frames will be returned.
+    """
+
+    raw_frames = deque(maxlen=max_stack_depth)  # type: Deque[FrameType]
+
+    while raw_frame is not None:
+        f_back = raw_frame.f_back
+        raw_frames.append(raw_frame)
+        raw_frame = f_back
+
+    frame_ids = tuple(frame_id(raw_frame) for raw_frame in raw_frames)
+    frames = []
+    for i, fid in enumerate(frame_ids):
+        frame = cache.get(fid)
+        if frame is None:
+            frame = extract_frame(fid, raw_frames[i], cwd)
+            cache.set(fid, frame)
+        frames.append(frame)
+
+    # Instead of mapping the stack into frame ids and hashing
+    # that as a tuple, we can directly hash the stack.
+    # This saves us from having to generate yet another list.
+    # Additionally, using the stack as the key directly is
+    # costly because the stack can be large, so we pre-hash
+    # the stack, and use the hash as the key as this will be
+    # needed a few times to improve performance.
+    #
+    # To Reduce the likelihood of hash collisions, we include
+    # the stack depth. This means that only stacks of the same
+    # depth can suffer from hash collisions.
+    stack_id = len(raw_frames), hash(frame_ids)
+
+    return stack_id, frame_ids, frames
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/py.typed b/.venv/lib/python3.12/site-packages/sentry_sdk/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/py.typed
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/scope.py b/.venv/lib/python3.12/site-packages/sentry_sdk/scope.py
new file mode 100644
index 00000000..6a5e70a6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/scope.py
@@ -0,0 +1,1786 @@
+import os
+import sys
+import warnings
+from copy import copy, deepcopy
+from collections import deque
+from contextlib import contextmanager
+from enum import Enum
+from datetime import datetime, timezone
+from functools import wraps
+from itertools import chain
+
+from sentry_sdk.attachments import Attachment
+from sentry_sdk.consts import DEFAULT_MAX_BREADCRUMBS, FALSE_VALUES, INSTRUMENTER
+from sentry_sdk.feature_flags import FlagBuffer, DEFAULT_FLAG_CAPACITY
+from sentry_sdk.profiler.continuous_profiler import (
+    get_profiler_id,
+    try_autostart_continuous_profiler,
+    try_profile_lifecycle_trace_start,
+)
+from sentry_sdk.profiler.transaction_profiler import Profile
+from sentry_sdk.session import Session
+from sentry_sdk.tracing_utils import (
+    Baggage,
+    has_tracing_enabled,
+    normalize_incoming_data,
+    PropagationContext,
+)
+from sentry_sdk.tracing import (
+    BAGGAGE_HEADER_NAME,
+    SENTRY_TRACE_HEADER_NAME,
+    NoOpSpan,
+    Span,
+    Transaction,
+)
+from sentry_sdk.utils import (
+    capture_internal_exception,
+    capture_internal_exceptions,
+    ContextVar,
+    datetime_from_isoformat,
+    disable_capture_event,
+    event_from_exception,
+    exc_info_from_error,
+    logger,
+)
+
+import typing
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Mapping, MutableMapping
+
+    from typing import Any
+    from typing import Callable
+    from typing import Deque
+    from typing import Dict
+    from typing import Generator
+    from typing import Iterator
+    from typing import List
+    from typing import Optional
+    from typing import ParamSpec
+    from typing import Tuple
+    from typing import TypeVar
+    from typing import Union
+
+    from typing_extensions import Unpack
+
+    from sentry_sdk._types import (
+        Breadcrumb,
+        BreadcrumbHint,
+        ErrorProcessor,
+        Event,
+        EventProcessor,
+        ExcInfo,
+        Hint,
+        LogLevelStr,
+        SamplingContext,
+        Type,
+    )
+
+    from sentry_sdk.tracing import TransactionKwargs
+
+    import sentry_sdk
+
+    P = ParamSpec("P")
+    R = TypeVar("R")
+
+    F = TypeVar("F", bound=Callable[..., Any])
+    T = TypeVar("T")
+
+
+# Holds data that will be added to **all** events sent by this process.
+# In case this is a http server (think web framework) with multiple users
+# the data will be added to events of all users.
+# Typically this is used for process wide data such as the release.
+_global_scope = None  # type: Optional[Scope]
+
+# Holds data for the active request.
+# This is used to isolate data for different requests or users.
+# The isolation scope is usually created by integrations, but may also
+# be created manually
+_isolation_scope = ContextVar("isolation_scope", default=None)
+
+# Holds data for the active span.
+# This can be used to manually add additional data to a span.
+_current_scope = ContextVar("current_scope", default=None)
+
+global_event_processors = []  # type: List[EventProcessor]
+
+
+class ScopeType(Enum):
+    CURRENT = "current"
+    ISOLATION = "isolation"
+    GLOBAL = "global"
+    MERGED = "merged"
+
+
+class _ScopeManager:
+    def __init__(self, hub=None):
+        # type: (Optional[Any]) -> None
+        self._old_scopes = []  # type: List[Scope]
+
+    def __enter__(self):
+        # type: () -> Scope
+        isolation_scope = Scope.get_isolation_scope()
+
+        self._old_scopes.append(isolation_scope)
+
+        forked_scope = isolation_scope.fork()
+        _isolation_scope.set(forked_scope)
+
+        return forked_scope
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # type: (Any, Any, Any) -> None
+        old_scope = self._old_scopes.pop()
+        _isolation_scope.set(old_scope)
+
+
+def add_global_event_processor(processor):
+    # type: (EventProcessor) -> None
+    global_event_processors.append(processor)
+
+
+def _attr_setter(fn):
+    # type: (Any) -> Any
+    return property(fset=fn, doc=fn.__doc__)
+
+
+def _disable_capture(fn):
+    # type: (F) -> F
+    @wraps(fn)
+    def wrapper(self, *args, **kwargs):
+        # type: (Any, *Dict[str, Any], **Any) -> Any
+        if not self._should_capture:
+            return
+        try:
+            self._should_capture = False
+            return fn(self, *args, **kwargs)
+        finally:
+            self._should_capture = True
+
+    return wrapper  # type: ignore
+
+
+class Scope:
+    """The scope holds extra information that should be sent with all
+    events that belong to it.
+    """
+
+    # NOTE: Even though it should not happen, the scope needs to not crash when
+    # accessed by multiple threads. It's fine if it's full of races, but those
+    # races should never make the user application crash.
+    #
+    # The same needs to hold for any accesses of the scope the SDK makes.
+
+    __slots__ = (
+        "_level",
+        "_name",
+        "_fingerprint",
+        # note that for legacy reasons, _transaction is the transaction *name*,
+        # not a Transaction object (the object is stored in _span)
+        "_transaction",
+        "_transaction_info",
+        "_user",
+        "_tags",
+        "_contexts",
+        "_extras",
+        "_breadcrumbs",
+        "_event_processors",
+        "_error_processors",
+        "_should_capture",
+        "_span",
+        "_session",
+        "_attachments",
+        "_force_auto_session_tracking",
+        "_profile",
+        "_propagation_context",
+        "client",
+        "_type",
+        "_last_event_id",
+        "_flags",
+    )
+
+    def __init__(self, ty=None, client=None):
+        # type: (Optional[ScopeType], Optional[sentry_sdk.Client]) -> None
+        self._type = ty
+
+        self._event_processors = []  # type: List[EventProcessor]
+        self._error_processors = []  # type: List[ErrorProcessor]
+
+        self._name = None  # type: Optional[str]
+        self._propagation_context = None  # type: Optional[PropagationContext]
+
+        self.client = NonRecordingClient()  # type: sentry_sdk.client.BaseClient
+
+        if client is not None:
+            self.set_client(client)
+
+        self.clear()
+
+        incoming_trace_information = self._load_trace_data_from_env()
+        self.generate_propagation_context(incoming_data=incoming_trace_information)
+
+    def __copy__(self):
+        # type: () -> Scope
+        """
+        Returns a copy of this scope.
+        This also creates a copy of all referenced data structures.
+        """
+        rv = object.__new__(self.__class__)  # type: Scope
+
+        rv._type = self._type
+        rv.client = self.client
+        rv._level = self._level
+        rv._name = self._name
+        rv._fingerprint = self._fingerprint
+        rv._transaction = self._transaction
+        rv._transaction_info = dict(self._transaction_info)
+        rv._user = self._user
+
+        rv._tags = dict(self._tags)
+        rv._contexts = dict(self._contexts)
+        rv._extras = dict(self._extras)
+
+        rv._breadcrumbs = copy(self._breadcrumbs)
+        rv._event_processors = list(self._event_processors)
+        rv._error_processors = list(self._error_processors)
+        rv._propagation_context = self._propagation_context
+
+        rv._should_capture = self._should_capture
+        rv._span = self._span
+        rv._session = self._session
+        rv._force_auto_session_tracking = self._force_auto_session_tracking
+        rv._attachments = list(self._attachments)
+
+        rv._profile = self._profile
+
+        rv._last_event_id = self._last_event_id
+
+        rv._flags = deepcopy(self._flags)
+
+        return rv
+
+    @classmethod
+    def get_current_scope(cls):
+        # type: () -> Scope
+        """
+        .. versionadded:: 2.0.0
+
+        Returns the current scope.
+        """
+        current_scope = _current_scope.get()
+        if current_scope is None:
+            current_scope = Scope(ty=ScopeType.CURRENT)
+            _current_scope.set(current_scope)
+
+        return current_scope
+
+    @classmethod
+    def set_current_scope(cls, new_current_scope):
+        # type: (Scope) -> None
+        """
+        .. versionadded:: 2.0.0
+
+        Sets the given scope as the new current scope overwriting the existing current scope.
+        :param new_current_scope: The scope to set as the new current scope.
+        """
+        _current_scope.set(new_current_scope)
+
+    @classmethod
+    def get_isolation_scope(cls):
+        # type: () -> Scope
+        """
+        .. versionadded:: 2.0.0
+
+        Returns the isolation scope.
+        """
+        isolation_scope = _isolation_scope.get()
+        if isolation_scope is None:
+            isolation_scope = Scope(ty=ScopeType.ISOLATION)
+            _isolation_scope.set(isolation_scope)
+
+        return isolation_scope
+
+    @classmethod
+    def set_isolation_scope(cls, new_isolation_scope):
+        # type: (Scope) -> None
+        """
+        .. versionadded:: 2.0.0
+
+        Sets the given scope as the new isolation scope overwriting the existing isolation scope.
+        :param new_isolation_scope: The scope to set as the new isolation scope.
+        """
+        _isolation_scope.set(new_isolation_scope)
+
+    @classmethod
+    def get_global_scope(cls):
+        # type: () -> Scope
+        """
+        .. versionadded:: 2.0.0
+
+        Returns the global scope.
+        """
+        global _global_scope
+        if _global_scope is None:
+            _global_scope = Scope(ty=ScopeType.GLOBAL)
+
+        return _global_scope
+
+    @classmethod
+    def last_event_id(cls):
+        # type: () -> Optional[str]
+        """
+        .. versionadded:: 2.2.0
+
+        Returns event ID of the event most recently captured by the isolation scope, or None if no event
+        has been captured. We do not consider events that are dropped, e.g. by a before_send hook.
+        Transactions also are not considered events in this context.
+
+        The event corresponding to the returned event ID is NOT guaranteed to actually be sent to Sentry;
+        whether the event is sent depends on the transport. The event could be sent later or not at all.
+        Even a sent event could fail to arrive in Sentry due to network issues, exhausted quotas, or
+        various other reasons.
+        """
+        return cls.get_isolation_scope()._last_event_id
+
+    def _merge_scopes(self, additional_scope=None, additional_scope_kwargs=None):
+        # type: (Optional[Scope], Optional[Dict[str, Any]]) -> Scope
+        """
+        Merges global, isolation and current scope into a new scope and
+        adds the given additional scope or additional scope kwargs to it.
+        """
+        if additional_scope and additional_scope_kwargs:
+            raise TypeError("cannot provide scope and kwargs")
+
+        final_scope = copy(_global_scope) if _global_scope is not None else Scope()
+        final_scope._type = ScopeType.MERGED
+
+        isolation_scope = _isolation_scope.get()
+        if isolation_scope is not None:
+            final_scope.update_from_scope(isolation_scope)
+
+        current_scope = _current_scope.get()
+        if current_scope is not None:
+            final_scope.update_from_scope(current_scope)
+
+        if self != current_scope and self != isolation_scope:
+            final_scope.update_from_scope(self)
+
+        if additional_scope is not None:
+            if callable(additional_scope):
+                additional_scope(final_scope)
+            else:
+                final_scope.update_from_scope(additional_scope)
+
+        elif additional_scope_kwargs:
+            final_scope.update_from_kwargs(**additional_scope_kwargs)
+
+        return final_scope
+
+    @classmethod
+    def get_client(cls):
+        # type: () -> sentry_sdk.client.BaseClient
+        """
+        .. versionadded:: 2.0.0
+
+        Returns the currently used :py:class:`sentry_sdk.Client`.
+        This checks the current scope, the isolation scope and the global scope for a client.
+        If no client is available a :py:class:`sentry_sdk.client.NonRecordingClient` is returned.
+        """
+        current_scope = _current_scope.get()
+        try:
+            client = current_scope.client
+        except AttributeError:
+            client = None
+
+        if client is not None and client.is_active():
+            return client
+
+        isolation_scope = _isolation_scope.get()
+        try:
+            client = isolation_scope.client
+        except AttributeError:
+            client = None
+
+        if client is not None and client.is_active():
+            return client
+
+        try:
+            client = _global_scope.client  # type: ignore
+        except AttributeError:
+            client = None
+
+        if client is not None and client.is_active():
+            return client
+
+        return NonRecordingClient()
+
+    def set_client(self, client=None):
+        # type: (Optional[sentry_sdk.client.BaseClient]) -> None
+        """
+        .. versionadded:: 2.0.0
+
+        Sets the client for this scope.
+
+        :param client: The client to use in this scope.
+            If `None` the client of the scope will be replaced by a :py:class:`sentry_sdk.NonRecordingClient`.
+
+        """
+        self.client = client if client is not None else NonRecordingClient()
+
+    def fork(self):
+        # type: () -> Scope
+        """
+        .. versionadded:: 2.0.0
+
+        Returns a fork of this scope.
+        """
+        forked_scope = copy(self)
+        return forked_scope
+
+    def _load_trace_data_from_env(self):
+        # type: () -> Optional[Dict[str, str]]
+        """
+        Load Sentry trace id and baggage from environment variables.
+        Can be disabled by setting SENTRY_USE_ENVIRONMENT to "false".
+        """
+        incoming_trace_information = None
+
+        sentry_use_environment = (
+            os.environ.get("SENTRY_USE_ENVIRONMENT") or ""
+        ).lower()
+        use_environment = sentry_use_environment not in FALSE_VALUES
+        if use_environment:
+            incoming_trace_information = {}
+
+            if os.environ.get("SENTRY_TRACE"):
+                incoming_trace_information[SENTRY_TRACE_HEADER_NAME] = (
+                    os.environ.get("SENTRY_TRACE") or ""
+                )
+
+            if os.environ.get("SENTRY_BAGGAGE"):
+                incoming_trace_information[BAGGAGE_HEADER_NAME] = (
+                    os.environ.get("SENTRY_BAGGAGE") or ""
+                )
+
+        return incoming_trace_information or None
+
+    def set_new_propagation_context(self):
+        # type: () -> None
+        """
+        Creates a new propagation context and sets it as `_propagation_context`. Overwriting existing one.
+        """
+        self._propagation_context = PropagationContext()
+
+    def generate_propagation_context(self, incoming_data=None):
+        # type: (Optional[Dict[str, str]]) -> None
+        """
+        Makes sure the propagation context is set on the scope.
+        If there is `incoming_data` overwrite existing propagation context.
+        If there is no `incoming_data` create new propagation context, but do NOT overwrite if already existing.
+        """
+        if incoming_data:
+            propagation_context = PropagationContext.from_incoming_data(incoming_data)
+            if propagation_context is not None:
+                self._propagation_context = propagation_context
+
+        if self._type != ScopeType.CURRENT:
+            if self._propagation_context is None:
+                self.set_new_propagation_context()
+
+    def get_dynamic_sampling_context(self):
+        # type: () -> Optional[Dict[str, str]]
+        """
+        Returns the Dynamic Sampling Context from the Propagation Context.
+        If not existing, creates a new one.
+        """
+        if self._propagation_context is None:
+            return None
+
+        baggage = self.get_baggage()
+        if baggage is not None:
+            self._propagation_context.dynamic_sampling_context = (
+                baggage.dynamic_sampling_context()
+            )
+
+        return self._propagation_context.dynamic_sampling_context
+
+    def get_traceparent(self, *args, **kwargs):
+        # type: (Any, Any) -> Optional[str]
+        """
+        Returns the Sentry "sentry-trace" header (aka the traceparent) from the
+        currently active span or the scopes Propagation Context.
+        """
+        client = self.get_client()
+
+        # If we have an active span, return traceparent from there
+        if has_tracing_enabled(client.options) and self.span is not None:
+            return self.span.to_traceparent()
+
+        # If this scope has a propagation context, return traceparent from there
+        if self._propagation_context is not None:
+            traceparent = "%s-%s" % (
+                self._propagation_context.trace_id,
+                self._propagation_context.span_id,
+            )
+            return traceparent
+
+        # Fall back to isolation scope's traceparent. It always has one
+        return self.get_isolation_scope().get_traceparent()
+
+    def get_baggage(self, *args, **kwargs):
+        # type: (Any, Any) -> Optional[Baggage]
+        """
+        Returns the Sentry "baggage" header containing trace information from the
+        currently active span or the scopes Propagation Context.
+        """
+        client = self.get_client()
+
+        # If we have an active span, return baggage from there
+        if has_tracing_enabled(client.options) and self.span is not None:
+            return self.span.to_baggage()
+
+        # If this scope has a propagation context, return baggage from there
+        if self._propagation_context is not None:
+            dynamic_sampling_context = (
+                self._propagation_context.dynamic_sampling_context
+            )
+            if dynamic_sampling_context is None:
+                return Baggage.from_options(self)
+            else:
+                return Baggage(dynamic_sampling_context)
+
+        # Fall back to isolation scope's baggage. It always has one
+        return self.get_isolation_scope().get_baggage()
+
+    def get_trace_context(self):
+        # type: () -> Any
+        """
+        Returns the Sentry "trace" context from the Propagation Context.
+        """
+        if self._propagation_context is None:
+            return None
+
+        trace_context = {
+            "trace_id": self._propagation_context.trace_id,
+            "span_id": self._propagation_context.span_id,
+            "parent_span_id": self._propagation_context.parent_span_id,
+            "dynamic_sampling_context": self.get_dynamic_sampling_context(),
+        }  # type: Dict[str, Any]
+
+        return trace_context
+
+    def trace_propagation_meta(self, *args, **kwargs):
+        # type: (*Any, **Any) -> str
+        """
+        Return meta tags which should be injected into HTML templates
+        to allow propagation of trace information.
+        """
+        span = kwargs.pop("span", None)
+        if span is not None:
+            logger.warning(
+                "The parameter `span` in trace_propagation_meta() is deprecated and will be removed in the future."
+            )
+
+        meta = ""
+
+        sentry_trace = self.get_traceparent()
+        if sentry_trace is not None:
+            meta += '<meta name="%s" content="%s">' % (
+                SENTRY_TRACE_HEADER_NAME,
+                sentry_trace,
+            )
+
+        baggage = self.get_baggage()
+        if baggage is not None:
+            meta += '<meta name="%s" content="%s">' % (
+                BAGGAGE_HEADER_NAME,
+                baggage.serialize(),
+            )
+
+        return meta
+
+    def iter_headers(self):
+        # type: () -> Iterator[Tuple[str, str]]
+        """
+        Creates a generator which returns the `sentry-trace` and `baggage` headers from the Propagation Context.
+        """
+        if self._propagation_context is not None:
+            traceparent = self.get_traceparent()
+            if traceparent is not None:
+                yield SENTRY_TRACE_HEADER_NAME, traceparent
+
+            dsc = self.get_dynamic_sampling_context()
+            if dsc is not None:
+                baggage = Baggage(dsc).serialize()
+                yield BAGGAGE_HEADER_NAME, baggage
+
+    def iter_trace_propagation_headers(self, *args, **kwargs):
+        # type: (Any, Any) -> Generator[Tuple[str, str], None, None]
+        """
+        Return HTTP headers which allow propagation of trace data.
+
+        If a span is given, the trace data will taken from the span.
+        If no span is given, the trace data is taken from the scope.
+        """
+        client = self.get_client()
+        if not client.options.get("propagate_traces"):
+            warnings.warn(
+                "The `propagate_traces` parameter is deprecated. Please use `trace_propagation_targets` instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            return
+
+        span = kwargs.pop("span", None)
+        span = span or self.span
+
+        if has_tracing_enabled(client.options) and span is not None:
+            for header in span.iter_headers():
+                yield header
+        else:
+            # If this scope has a propagation context, return headers from there
+            # (it could be that self is not the current scope nor the isolation scope)
+            if self._propagation_context is not None:
+                for header in self.iter_headers():
+                    yield header
+            else:
+                # otherwise try headers from current scope
+                current_scope = self.get_current_scope()
+                if current_scope._propagation_context is not None:
+                    for header in current_scope.iter_headers():
+                        yield header
+                else:
+                    # otherwise fall back to headers from isolation scope
+                    isolation_scope = self.get_isolation_scope()
+                    if isolation_scope._propagation_context is not None:
+                        for header in isolation_scope.iter_headers():
+                            yield header
+
+    def get_active_propagation_context(self):
+        # type: () -> Optional[PropagationContext]
+        if self._propagation_context is not None:
+            return self._propagation_context
+
+        current_scope = self.get_current_scope()
+        if current_scope._propagation_context is not None:
+            return current_scope._propagation_context
+
+        isolation_scope = self.get_isolation_scope()
+        if isolation_scope._propagation_context is not None:
+            return isolation_scope._propagation_context
+
+        return None
+
+    def clear(self):
+        # type: () -> None
+        """Clears the entire scope."""
+        self._level = None  # type: Optional[LogLevelStr]
+        self._fingerprint = None  # type: Optional[List[str]]
+        self._transaction = None  # type: Optional[str]
+        self._transaction_info = {}  # type: MutableMapping[str, str]
+        self._user = None  # type: Optional[Dict[str, Any]]
+
+        self._tags = {}  # type: Dict[str, Any]
+        self._contexts = {}  # type: Dict[str, Dict[str, Any]]
+        self._extras = {}  # type: MutableMapping[str, Any]
+        self._attachments = []  # type: List[Attachment]
+
+        self.clear_breadcrumbs()
+        self._should_capture = True  # type: bool
+
+        self._span = None  # type: Optional[Span]
+        self._session = None  # type: Optional[Session]
+        self._force_auto_session_tracking = None  # type: Optional[bool]
+
+        self._profile = None  # type: Optional[Profile]
+
+        self._propagation_context = None
+
+        # self._last_event_id is only applicable to isolation scopes
+        self._last_event_id = None  # type: Optional[str]
+        self._flags = None  # type: Optional[FlagBuffer]
+
+    @_attr_setter
+    def level(self, value):
+        # type: (LogLevelStr) -> None
+        """
+        When set this overrides the level.
+
+        .. deprecated:: 1.0.0
+            Use :func:`set_level` instead.
+
+        :param value: The level to set.
+        """
+        logger.warning(
+            "Deprecated: use .set_level() instead. This will be removed in the future."
+        )
+
+        self._level = value
+
+    def set_level(self, value):
+        # type: (LogLevelStr) -> None
+        """
+        Sets the level for the scope.
+
+        :param value: The level to set.
+        """
+        self._level = value
+
+    @_attr_setter
+    def fingerprint(self, value):
+        # type: (Optional[List[str]]) -> None
+        """When set this overrides the default fingerprint."""
+        self._fingerprint = value
+
+    @property
+    def transaction(self):
+        # type: () -> Any
+        # would be type: () -> Optional[Transaction], see https://github.com/python/mypy/issues/3004
+        """Return the transaction (root span) in the scope, if any."""
+
+        # there is no span/transaction on the scope
+        if self._span is None:
+            return None
+
+        # there is an orphan span on the scope
+        if self._span.containing_transaction is None:
+            return None
+
+        # there is either a transaction (which is its own containing
+        # transaction) or a non-orphan span on the scope
+        return self._span.containing_transaction
+
+    @transaction.setter
+    def transaction(self, value):
+        # type: (Any) -> None
+        # would be type: (Optional[str]) -> None, see https://github.com/python/mypy/issues/3004
+        """When set this forces a specific transaction name to be set.
+
+        Deprecated: use set_transaction_name instead."""
+
+        # XXX: the docstring above is misleading. The implementation of
+        # apply_to_event prefers an existing value of event.transaction over
+        # anything set in the scope.
+        # XXX: note that with the introduction of the Scope.transaction getter,
+        # there is a semantic and type mismatch between getter and setter. The
+        # getter returns a Transaction, the setter sets a transaction name.
+        # Without breaking version compatibility, we could make the setter set a
+        # transaction name or transaction (self._span) depending on the type of
+        # the value argument.
+
+        logger.warning(
+            "Assigning to scope.transaction directly is deprecated: use scope.set_transaction_name() instead."
+        )
+        self._transaction = value
+        if self._span and self._span.containing_transaction:
+            self._span.containing_transaction.name = value
+
+    def set_transaction_name(self, name, source=None):
+        # type: (str, Optional[str]) -> None
+        """Set the transaction name and optionally the transaction source."""
+        self._transaction = name
+
+        if self._span and self._span.containing_transaction:
+            self._span.containing_transaction.name = name
+            if source:
+                self._span.containing_transaction.source = source
+
+        if source:
+            self._transaction_info["source"] = source
+
+    @_attr_setter
+    def user(self, value):
+        # type: (Optional[Dict[str, Any]]) -> None
+        """When set a specific user is bound to the scope. Deprecated in favor of set_user."""
+        self.set_user(value)
+
+    def set_user(self, value):
+        # type: (Optional[Dict[str, Any]]) -> None
+        """Sets a user for the scope."""
+        self._user = value
+        session = self.get_isolation_scope()._session
+        if session is not None:
+            session.update(user=value)
+
+    @property
+    def span(self):
+        # type: () -> Optional[Span]
+        """Get/set current tracing span or transaction."""
+        return self._span
+
+    @span.setter
+    def span(self, span):
+        # type: (Optional[Span]) -> None
+        self._span = span
+        # XXX: this differs from the implementation in JS, there Scope.setSpan
+        # does not set Scope._transactionName.
+        if isinstance(span, Transaction):
+            transaction = span
+            if transaction.name:
+                self._transaction = transaction.name
+                if transaction.source:
+                    self._transaction_info["source"] = transaction.source
+
+    @property
+    def profile(self):
+        # type: () -> Optional[Profile]
+        return self._profile
+
+    @profile.setter
+    def profile(self, profile):
+        # type: (Optional[Profile]) -> None
+
+        self._profile = profile
+
+    def set_tag(self, key, value):
+        # type: (str, Any) -> None
+        """
+        Sets a tag for a key to a specific value.
+
+        :param key: Key of the tag to set.
+
+        :param value: Value of the tag to set.
+        """
+        self._tags[key] = value
+
+    def set_tags(self, tags):
+        # type: (Mapping[str, object]) -> None
+        """Sets multiple tags at once.
+
+        This method updates multiple tags at once. The tags are passed as a dictionary
+        or other mapping type.
+
+        Calling this method is equivalent to calling `set_tag` on each key-value pair
+        in the mapping. If a tag key already exists in the scope, its value will be
+        updated. If the tag key does not exist in the scope, the key-value pair will
+        be added to the scope.
+
+        This method only modifies tag keys in the `tags` mapping passed to the method.
+        `scope.set_tags({})` is, therefore, a no-op.
+
+        :param tags: A mapping of tag keys to tag values to set.
+        """
+        self._tags.update(tags)
+
+    def remove_tag(self, key):
+        # type: (str) -> None
+        """
+        Removes a specific tag.
+
+        :param key: Key of the tag to remove.
+        """
+        self._tags.pop(key, None)
+
+    def set_context(
+        self,
+        key,  # type: str
+        value,  # type: Dict[str, Any]
+    ):
+        # type: (...) -> None
+        """
+        Binds a context at a certain key to a specific value.
+        """
+        self._contexts[key] = value
+
+    def remove_context(
+        self, key  # type: str
+    ):
+        # type: (...) -> None
+        """Removes a context."""
+        self._contexts.pop(key, None)
+
+    def set_extra(
+        self,
+        key,  # type: str
+        value,  # type: Any
+    ):
+        # type: (...) -> None
+        """Sets an extra key to a specific value."""
+        self._extras[key] = value
+
+    def remove_extra(
+        self, key  # type: str
+    ):
+        # type: (...) -> None
+        """Removes a specific extra key."""
+        self._extras.pop(key, None)
+
+    def clear_breadcrumbs(self):
+        # type: () -> None
+        """Clears breadcrumb buffer."""
+        self._breadcrumbs = deque()  # type: Deque[Breadcrumb]
+
+    def add_attachment(
+        self,
+        bytes=None,  # type: Union[None, bytes, Callable[[], bytes]]
+        filename=None,  # type: Optional[str]
+        path=None,  # type: Optional[str]
+        content_type=None,  # type: Optional[str]
+        add_to_transactions=False,  # type: bool
+    ):
+        # type: (...) -> None
+        """Adds an attachment to future events sent from this scope.
+
+        The parameters are the same as for the :py:class:`sentry_sdk.attachments.Attachment` constructor.
+        """
+        self._attachments.append(
+            Attachment(
+                bytes=bytes,
+                path=path,
+                filename=filename,
+                content_type=content_type,
+                add_to_transactions=add_to_transactions,
+            )
+        )
+
+    def add_breadcrumb(self, crumb=None, hint=None, **kwargs):
+        # type: (Optional[Breadcrumb], Optional[BreadcrumbHint], Any) -> None
+        """
+        Adds a breadcrumb.
+
+        :param crumb: Dictionary with the data as the sentry v7/v8 protocol expects.
+
+        :param hint: An optional value that can be used by `before_breadcrumb`
+            to customize the breadcrumbs that are emitted.
+        """
+        client = self.get_client()
+
+        if not client.is_active():
+            logger.info("Dropped breadcrumb because no client bound")
+            return
+
+        before_breadcrumb = client.options.get("before_breadcrumb")
+        max_breadcrumbs = client.options.get("max_breadcrumbs", DEFAULT_MAX_BREADCRUMBS)
+
+        crumb = dict(crumb or ())  # type: Breadcrumb
+        crumb.update(kwargs)
+        if not crumb:
+            return
+
+        hint = dict(hint or ())  # type: Hint
+
+        if crumb.get("timestamp") is None:
+            crumb["timestamp"] = datetime.now(timezone.utc)
+        if crumb.get("type") is None:
+            crumb["type"] = "default"
+
+        if before_breadcrumb is not None:
+            new_crumb = before_breadcrumb(crumb, hint)
+        else:
+            new_crumb = crumb
+
+        if new_crumb is not None:
+            self._breadcrumbs.append(new_crumb)
+        else:
+            logger.info("before breadcrumb dropped breadcrumb (%s)", crumb)
+
+        while len(self._breadcrumbs) > max_breadcrumbs:
+            self._breadcrumbs.popleft()
+
+    def start_transaction(
+        self,
+        transaction=None,
+        instrumenter=INSTRUMENTER.SENTRY,
+        custom_sampling_context=None,
+        **kwargs,
+    ):
+        # type: (Optional[Transaction], str, Optional[SamplingContext], Unpack[TransactionKwargs]) -> Union[Transaction, NoOpSpan]
+        """
+        Start and return a transaction.
+
+        Start an existing transaction if given, otherwise create and start a new
+        transaction with kwargs.
+
+        This is the entry point to manual tracing instrumentation.
+
+        A tree structure can be built by adding child spans to the transaction,
+        and child spans to other spans. To start a new child span within the
+        transaction or any span, call the respective `.start_child()` method.
+
+        Every child span must be finished before the transaction is finished,
+        otherwise the unfinished spans are discarded.
+
+        When used as context managers, spans and transactions are automatically
+        finished at the end of the `with` block. If not using context managers,
+        call the `.finish()` method.
+
+        When the transaction is finished, it will be sent to Sentry with all its
+        finished child spans.
+
+        :param transaction: The transaction to start. If omitted, we create and
+            start a new transaction.
+        :param instrumenter: This parameter is meant for internal use only. It
+            will be removed in the next major version.
+        :param custom_sampling_context: The transaction's custom sampling context.
+        :param kwargs: Optional keyword arguments to be passed to the Transaction
+            constructor. See :py:class:`sentry_sdk.tracing.Transaction` for
+            available arguments.
+        """
+        kwargs.setdefault("scope", self)
+
+        client = self.get_client()
+
+        configuration_instrumenter = client.options["instrumenter"]
+
+        if instrumenter != configuration_instrumenter:
+            return NoOpSpan()
+
+        try_autostart_continuous_profiler()
+
+        custom_sampling_context = custom_sampling_context or {}
+
+        # kwargs at this point has type TransactionKwargs, since we have removed
+        # the client and custom_sampling_context from it.
+        transaction_kwargs = kwargs  # type: TransactionKwargs
+
+        # if we haven't been given a transaction, make one
+        if transaction is None:
+            transaction = Transaction(**transaction_kwargs)
+
+        # use traces_sample_rate, traces_sampler, and/or inheritance to make a
+        # sampling decision
+        sampling_context = {
+            "transaction_context": transaction.to_json(),
+            "parent_sampled": transaction.parent_sampled,
+        }
+        sampling_context.update(custom_sampling_context)
+        transaction._set_initial_sampling_decision(sampling_context=sampling_context)
+
+        # update the sample rate in the dsc
+        if transaction.sample_rate is not None:
+            propagation_context = self.get_active_propagation_context()
+            if propagation_context:
+                dsc = propagation_context.dynamic_sampling_context
+                if dsc is not None:
+                    dsc["sample_rate"] = str(transaction.sample_rate)
+            if transaction._baggage:
+                transaction._baggage.sentry_items["sample_rate"] = str(
+                    transaction.sample_rate
+                )
+
+        if transaction.sampled:
+            profile = Profile(
+                transaction.sampled, transaction._start_timestamp_monotonic_ns
+            )
+            profile._set_initial_sampling_decision(sampling_context=sampling_context)
+
+            transaction._profile = profile
+
+            transaction._continuous_profile = try_profile_lifecycle_trace_start()
+
+            # Typically, the profiler is set when the transaction is created. But when
+            # using the auto lifecycle, the profiler isn't running when the first
+            # transaction is started. So make sure we update the profiler id on it.
+            if transaction._continuous_profile is not None:
+                transaction.set_profiler_id(get_profiler_id())
+
+            # we don't bother to keep spans if we already know we're not going to
+            # send the transaction
+            max_spans = (client.options["_experiments"].get("max_spans")) or 1000
+            transaction.init_span_recorder(maxlen=max_spans)
+
+        return transaction
+
+    def start_span(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
+        # type: (str, Any) -> Span
+        """
+        Start a span whose parent is the currently active span or transaction, if any.
+
+        The return value is a :py:class:`sentry_sdk.tracing.Span` instance,
+        typically used as a context manager to start and stop timing in a `with`
+        block.
+
+        Only spans contained in a transaction are sent to Sentry. Most
+        integrations start a transaction at the appropriate time, for example
+        for every incoming HTTP request. Use
+        :py:meth:`sentry_sdk.start_transaction` to start a new transaction when
+        one is not already in progress.
+
+        For supported `**kwargs` see :py:class:`sentry_sdk.tracing.Span`.
+
+        The instrumenter parameter is deprecated for user code, and it will
+        be removed in the next major version. Going forward, it should only
+        be used by the SDK itself.
+        """
+        if kwargs.get("description") is not None:
+            warnings.warn(
+                "The `description` parameter is deprecated. Please use `name` instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+        with new_scope():
+            kwargs.setdefault("scope", self)
+
+            client = self.get_client()
+
+            configuration_instrumenter = client.options["instrumenter"]
+
+            if instrumenter != configuration_instrumenter:
+                return NoOpSpan()
+
+            # get current span or transaction
+            span = self.span or self.get_isolation_scope().span
+
+            if span is None:
+                # New spans get the `trace_id` from the scope
+                if "trace_id" not in kwargs:
+                    propagation_context = self.get_active_propagation_context()
+                    if propagation_context is not None:
+                        kwargs["trace_id"] = propagation_context.trace_id
+
+                span = Span(**kwargs)
+            else:
+                # Children take `trace_id`` from the parent span.
+                span = span.start_child(**kwargs)
+
+            return span
+
+    def continue_trace(
+        self, environ_or_headers, op=None, name=None, source=None, origin="manual"
+    ):
+        # type: (Dict[str, Any], Optional[str], Optional[str], Optional[str], str) -> Transaction
+        """
+        Sets the propagation context from environment or headers and returns a transaction.
+        """
+        self.generate_propagation_context(environ_or_headers)
+
+        # When we generate the propagation context, the sample_rand value is set
+        # if missing or invalid (we use the original value if it's valid).
+        # We want the transaction to use the same sample_rand value. Due to duplicated
+        # propagation logic in the transaction, we pass it in to avoid recomputing it
+        # in the transaction.
+        # TYPE SAFETY: self.generate_propagation_context() ensures that self._propagation_context
+        # is not None.
+        sample_rand = typing.cast(
+            PropagationContext, self._propagation_context
+        )._sample_rand()
+
+        transaction = Transaction.continue_from_headers(
+            normalize_incoming_data(environ_or_headers),
+            _sample_rand=sample_rand,
+            op=op,
+            origin=origin,
+            name=name,
+            source=source,
+        )
+
+        return transaction
+
+    def capture_event(self, event, hint=None, scope=None, **scope_kwargs):
+        # type: (Event, Optional[Hint], Optional[Scope], Any) -> Optional[str]
+        """
+        Captures an event.
+
+        Merges given scope data and calls :py:meth:`sentry_sdk.client._Client.capture_event`.
+
+        :param event: A ready-made event that can be directly sent to Sentry.
+
+        :param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.client._Client.capture_event`).
+        """
+        if disable_capture_event.get(False):
+            return None
+
+        scope = self._merge_scopes(scope, scope_kwargs)
+
+        event_id = self.get_client().capture_event(event=event, hint=hint, scope=scope)
+
+        if event_id is not None and event.get("type") != "transaction":
+            self.get_isolation_scope()._last_event_id = event_id
+
+        return event_id
+
+    def capture_message(self, message, level=None, scope=None, **scope_kwargs):
+        # type: (str, Optional[LogLevelStr], Optional[Scope], Any) -> Optional[str]
+        """
+        Captures a message.
+
+        :param message: The string to send as the message.
+
+        :param level: If no level is provided, the default level is `info`.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.client._Client.capture_event`).
+        """
+        if disable_capture_event.get(False):
+            return None
+
+        if level is None:
+            level = "info"
+
+        event = {
+            "message": message,
+            "level": level,
+        }  # type: Event
+
+        return self.capture_event(event, scope=scope, **scope_kwargs)
+
+    def capture_exception(self, error=None, scope=None, **scope_kwargs):
+        # type: (Optional[Union[BaseException, ExcInfo]], Optional[Scope], Any) -> Optional[str]
+        """Captures an exception.
+
+        :param error: An exception to capture. If `None`, `sys.exc_info()` will be used.
+
+        :param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :param scope_kwargs: Optional data to apply to event.
+            For supported `**scope_kwargs` see :py:meth:`sentry_sdk.Scope.update_from_kwargs`.
+            The `scope` and `scope_kwargs` parameters are mutually exclusive.
+
+        :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.client._Client.capture_event`).
+        """
+        if disable_capture_event.get(False):
+            return None
+
+        if error is not None:
+            exc_info = exc_info_from_error(error)
+        else:
+            exc_info = sys.exc_info()
+
+        event, hint = event_from_exception(
+            exc_info, client_options=self.get_client().options
+        )
+
+        try:
+            return self.capture_event(event, hint=hint, scope=scope, **scope_kwargs)
+        except Exception:
+            capture_internal_exception(sys.exc_info())
+
+        return None
+
+    def start_session(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        """Starts a new session."""
+        session_mode = kwargs.pop("session_mode", "application")
+
+        self.end_session()
+
+        client = self.get_client()
+        self._session = Session(
+            release=client.options.get("release"),
+            environment=client.options.get("environment"),
+            user=self._user,
+            session_mode=session_mode,
+        )
+
+    def end_session(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        """Ends the current session if there is one."""
+        session = self._session
+        self._session = None
+
+        if session is not None:
+            session.close()
+            self.get_client().capture_session(session)
+
+    def stop_auto_session_tracking(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        """Stops automatic session tracking.
+
+        This temporarily session tracking for the current scope when called.
+        To resume session tracking call `resume_auto_session_tracking`.
+        """
+        self.end_session()
+        self._force_auto_session_tracking = False
+
+    def resume_auto_session_tracking(self):
+        # type: (...) -> None
+        """Resumes automatic session tracking for the current scope if
+        disabled earlier.  This requires that generally automatic session
+        tracking is enabled.
+        """
+        self._force_auto_session_tracking = None
+
+    def add_event_processor(
+        self, func  # type: EventProcessor
+    ):
+        # type: (...) -> None
+        """Register a scope local event processor on the scope.
+
+        :param func: This function behaves like `before_send.`
+        """
+        if len(self._event_processors) > 20:
+            logger.warning(
+                "Too many event processors on scope! Clearing list to free up some memory: %r",
+                self._event_processors,
+            )
+            del self._event_processors[:]
+
+        self._event_processors.append(func)
+
+    def add_error_processor(
+        self,
+        func,  # type: ErrorProcessor
+        cls=None,  # type: Optional[Type[BaseException]]
+    ):
+        # type: (...) -> None
+        """Register a scope local error processor on the scope.
+
+        :param func: A callback that works similar to an event processor but is invoked with the original exception info triple as second argument.
+
+        :param cls: Optionally, only process exceptions of this type.
+        """
+        if cls is not None:
+            cls_ = cls  # For mypy.
+            real_func = func
+
+            def func(event, exc_info):
+                # type: (Event, ExcInfo) -> Optional[Event]
+                try:
+                    is_inst = isinstance(exc_info[1], cls_)
+                except Exception:
+                    is_inst = False
+                if is_inst:
+                    return real_func(event, exc_info)
+                return event
+
+        self._error_processors.append(func)
+
+    def _apply_level_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if self._level is not None:
+            event["level"] = self._level
+
+    def _apply_breadcrumbs_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        event.setdefault("breadcrumbs", {}).setdefault("values", []).extend(
+            self._breadcrumbs
+        )
+
+        # Attempt to sort timestamps
+        try:
+            for crumb in event["breadcrumbs"]["values"]:
+                if isinstance(crumb["timestamp"], str):
+                    crumb["timestamp"] = datetime_from_isoformat(crumb["timestamp"])
+
+            event["breadcrumbs"]["values"].sort(key=lambda crumb: crumb["timestamp"])
+        except Exception as err:
+            logger.debug("Error when sorting breadcrumbs", exc_info=err)
+            pass
+
+    def _apply_user_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if event.get("user") is None and self._user is not None:
+            event["user"] = self._user
+
+    def _apply_transaction_name_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if event.get("transaction") is None and self._transaction is not None:
+            event["transaction"] = self._transaction
+
+    def _apply_transaction_info_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if event.get("transaction_info") is None and self._transaction_info is not None:
+            event["transaction_info"] = self._transaction_info
+
+    def _apply_fingerprint_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if event.get("fingerprint") is None and self._fingerprint is not None:
+            event["fingerprint"] = self._fingerprint
+
+    def _apply_extra_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if self._extras:
+            event.setdefault("extra", {}).update(self._extras)
+
+    def _apply_tags_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if self._tags:
+            event.setdefault("tags", {}).update(self._tags)
+
+    def _apply_contexts_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        if self._contexts:
+            event.setdefault("contexts", {}).update(self._contexts)
+
+        contexts = event.setdefault("contexts", {})
+
+        # Add "trace" context
+        if contexts.get("trace") is None:
+            if has_tracing_enabled(options) and self._span is not None:
+                contexts["trace"] = self._span.get_trace_context()
+            else:
+                contexts["trace"] = self.get_trace_context()
+
+    def _apply_flags_to_event(self, event, hint, options):
+        # type: (Event, Hint, Optional[Dict[str, Any]]) -> None
+        flags = self.flags.get()
+        if len(flags) > 0:
+            event.setdefault("contexts", {}).setdefault("flags", {}).update(
+                {"values": flags}
+            )
+
+    def _drop(self, cause, ty):
+        # type: (Any, str) -> Optional[Any]
+        logger.info("%s (%s) dropped event", ty, cause)
+        return None
+
+    def run_error_processors(self, event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+        """
+        Runs the error processors on the event and returns the modified event.
+        """
+        exc_info = hint.get("exc_info")
+        if exc_info is not None:
+            error_processors = chain(
+                self.get_global_scope()._error_processors,
+                self.get_isolation_scope()._error_processors,
+                self.get_current_scope()._error_processors,
+            )
+
+            for error_processor in error_processors:
+                new_event = error_processor(event, exc_info)
+                if new_event is None:
+                    return self._drop(error_processor, "error processor")
+
+                event = new_event
+
+        return event
+
+    def run_event_processors(self, event, hint):
+        # type: (Event, Hint) -> Optional[Event]
+        """
+        Runs the event processors on the event and returns the modified event.
+        """
+        ty = event.get("type")
+        is_check_in = ty == "check_in"
+
+        if not is_check_in:
+            # Get scopes without creating them to prevent infinite recursion
+            isolation_scope = _isolation_scope.get()
+            current_scope = _current_scope.get()
+
+            event_processors = chain(
+                global_event_processors,
+                _global_scope and _global_scope._event_processors or [],
+                isolation_scope and isolation_scope._event_processors or [],
+                current_scope and current_scope._event_processors or [],
+            )
+
+            for event_processor in event_processors:
+                new_event = event
+                with capture_internal_exceptions():
+                    new_event = event_processor(event, hint)
+                if new_event is None:
+                    return self._drop(event_processor, "event processor")
+                event = new_event
+
+        return event
+
+    @_disable_capture
+    def apply_to_event(
+        self,
+        event,  # type: Event
+        hint,  # type: Hint
+        options=None,  # type: Optional[Dict[str, Any]]
+    ):
+        # type: (...) -> Optional[Event]
+        """Applies the information contained on the scope to the given event."""
+        ty = event.get("type")
+        is_transaction = ty == "transaction"
+        is_check_in = ty == "check_in"
+
+        # put all attachments into the hint. This lets callbacks play around
+        # with attachments. We also later pull this out of the hint when we
+        # create the envelope.
+        attachments_to_send = hint.get("attachments") or []
+        for attachment in self._attachments:
+            if not is_transaction or attachment.add_to_transactions:
+                attachments_to_send.append(attachment)
+        hint["attachments"] = attachments_to_send
+
+        self._apply_contexts_to_event(event, hint, options)
+
+        if is_check_in:
+            # Check-ins only support the trace context, strip all others
+            event["contexts"] = {
+                "trace": event.setdefault("contexts", {}).get("trace", {})
+            }
+
+        if not is_check_in:
+            self._apply_level_to_event(event, hint, options)
+            self._apply_fingerprint_to_event(event, hint, options)
+            self._apply_user_to_event(event, hint, options)
+            self._apply_transaction_name_to_event(event, hint, options)
+            self._apply_transaction_info_to_event(event, hint, options)
+            self._apply_tags_to_event(event, hint, options)
+            self._apply_extra_to_event(event, hint, options)
+
+        if not is_transaction and not is_check_in:
+            self._apply_breadcrumbs_to_event(event, hint, options)
+            self._apply_flags_to_event(event, hint, options)
+
+        event = self.run_error_processors(event, hint)
+        if event is None:
+            return None
+
+        event = self.run_event_processors(event, hint)
+        if event is None:
+            return None
+
+        return event
+
+    def update_from_scope(self, scope):
+        # type: (Scope) -> None
+        """Update the scope with another scope's data."""
+        if scope._level is not None:
+            self._level = scope._level
+        if scope._fingerprint is not None:
+            self._fingerprint = scope._fingerprint
+        if scope._transaction is not None:
+            self._transaction = scope._transaction
+        if scope._transaction_info is not None:
+            self._transaction_info.update(scope._transaction_info)
+        if scope._user is not None:
+            self._user = scope._user
+        if scope._tags:
+            self._tags.update(scope._tags)
+        if scope._contexts:
+            self._contexts.update(scope._contexts)
+        if scope._extras:
+            self._extras.update(scope._extras)
+        if scope._breadcrumbs:
+            self._breadcrumbs.extend(scope._breadcrumbs)
+        if scope._span:
+            self._span = scope._span
+        if scope._attachments:
+            self._attachments.extend(scope._attachments)
+        if scope._profile:
+            self._profile = scope._profile
+        if scope._propagation_context:
+            self._propagation_context = scope._propagation_context
+        if scope._session:
+            self._session = scope._session
+        if scope._flags:
+            if not self._flags:
+                self._flags = deepcopy(scope._flags)
+            else:
+                for flag in scope._flags.get():
+                    self._flags.set(flag["flag"], flag["result"])
+
+    def update_from_kwargs(
+        self,
+        user=None,  # type: Optional[Any]
+        level=None,  # type: Optional[LogLevelStr]
+        extras=None,  # type: Optional[Dict[str, Any]]
+        contexts=None,  # type: Optional[Dict[str, Dict[str, Any]]]
+        tags=None,  # type: Optional[Dict[str, str]]
+        fingerprint=None,  # type: Optional[List[str]]
+    ):
+        # type: (...) -> None
+        """Update the scope's attributes."""
+        if level is not None:
+            self._level = level
+        if user is not None:
+            self._user = user
+        if extras is not None:
+            self._extras.update(extras)
+        if contexts is not None:
+            self._contexts.update(contexts)
+        if tags is not None:
+            self._tags.update(tags)
+        if fingerprint is not None:
+            self._fingerprint = fingerprint
+
+    def __repr__(self):
+        # type: () -> str
+        return "<%s id=%s name=%s type=%s>" % (
+            self.__class__.__name__,
+            hex(id(self)),
+            self._name,
+            self._type,
+        )
+
+    @property
+    def flags(self):
+        # type: () -> FlagBuffer
+        if self._flags is None:
+            max_flags = (
+                self.get_client().options["_experiments"].get("max_flags")
+                or DEFAULT_FLAG_CAPACITY
+            )
+            self._flags = FlagBuffer(capacity=max_flags)
+        return self._flags
+
+
+@contextmanager
+def new_scope():
+    # type: () -> Generator[Scope, None, None]
+    """
+    .. versionadded:: 2.0.0
+
+    Context manager that forks the current scope and runs the wrapped code in it.
+    After the wrapped code is executed, the original scope is restored.
+
+    Example Usage:
+
+    .. code-block:: python
+
+        import sentry_sdk
+
+        with sentry_sdk.new_scope() as scope:
+            scope.set_tag("color", "green")
+            sentry_sdk.capture_message("hello") # will include `color` tag.
+
+        sentry_sdk.capture_message("hello, again") # will NOT include `color` tag.
+
+    """
+    # fork current scope
+    current_scope = Scope.get_current_scope()
+    new_scope = current_scope.fork()
+    token = _current_scope.set(new_scope)
+
+    try:
+        yield new_scope
+
+    finally:
+        # restore original scope
+        _current_scope.reset(token)
+
+
+@contextmanager
+def use_scope(scope):
+    # type: (Scope) -> Generator[Scope, None, None]
+    """
+    .. versionadded:: 2.0.0
+
+    Context manager that uses the given `scope` and runs the wrapped code in it.
+    After the wrapped code is executed, the original scope is restored.
+
+    Example Usage:
+    Suppose the variable `scope` contains a `Scope` object, which is not currently
+    the active scope.
+
+    .. code-block:: python
+
+        import sentry_sdk
+
+        with sentry_sdk.use_scope(scope):
+            scope.set_tag("color", "green")
+            sentry_sdk.capture_message("hello") # will include `color` tag.
+
+        sentry_sdk.capture_message("hello, again") # will NOT include `color` tag.
+
+    """
+    # set given scope as current scope
+    token = _current_scope.set(scope)
+
+    try:
+        yield scope
+
+    finally:
+        # restore original scope
+        _current_scope.reset(token)
+
+
+@contextmanager
+def isolation_scope():
+    # type: () -> Generator[Scope, None, None]
+    """
+    .. versionadded:: 2.0.0
+
+    Context manager that forks the current isolation scope and runs the wrapped code in it.
+    The current scope is also forked to not bleed data into the existing current scope.
+    After the wrapped code is executed, the original scopes are restored.
+
+    Example Usage:
+
+    .. code-block:: python
+
+        import sentry_sdk
+
+        with sentry_sdk.isolation_scope() as scope:
+            scope.set_tag("color", "green")
+            sentry_sdk.capture_message("hello") # will include `color` tag.
+
+        sentry_sdk.capture_message("hello, again") # will NOT include `color` tag.
+
+    """
+    # fork current scope
+    current_scope = Scope.get_current_scope()
+    forked_current_scope = current_scope.fork()
+    current_token = _current_scope.set(forked_current_scope)
+
+    # fork isolation scope
+    isolation_scope = Scope.get_isolation_scope()
+    new_isolation_scope = isolation_scope.fork()
+    isolation_token = _isolation_scope.set(new_isolation_scope)
+
+    try:
+        yield new_isolation_scope
+
+    finally:
+        # restore original scopes
+        _current_scope.reset(current_token)
+        _isolation_scope.reset(isolation_token)
+
+
+@contextmanager
+def use_isolation_scope(isolation_scope):
+    # type: (Scope) -> Generator[Scope, None, None]
+    """
+    .. versionadded:: 2.0.0
+
+    Context manager that uses the given `isolation_scope` and runs the wrapped code in it.
+    The current scope is also forked to not bleed data into the existing current scope.
+    After the wrapped code is executed, the original scopes are restored.
+
+    Example Usage:
+
+    .. code-block:: python
+
+        import sentry_sdk
+
+        with sentry_sdk.isolation_scope() as scope:
+            scope.set_tag("color", "green")
+            sentry_sdk.capture_message("hello") # will include `color` tag.
+
+        sentry_sdk.capture_message("hello, again") # will NOT include `color` tag.
+
+    """
+    # fork current scope
+    current_scope = Scope.get_current_scope()
+    forked_current_scope = current_scope.fork()
+    current_token = _current_scope.set(forked_current_scope)
+
+    # set given scope as isolation scope
+    isolation_token = _isolation_scope.set(isolation_scope)
+
+    try:
+        yield isolation_scope
+
+    finally:
+        # restore original scopes
+        _current_scope.reset(current_token)
+        _isolation_scope.reset(isolation_token)
+
+
+def should_send_default_pii():
+    # type: () -> bool
+    """Shortcut for `Scope.get_client().should_send_default_pii()`."""
+    return Scope.get_client().should_send_default_pii()
+
+
+# Circular imports
+from sentry_sdk.client import NonRecordingClient
+
+if TYPE_CHECKING:
+    import sentry_sdk.client
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/scrubber.py b/.venv/lib/python3.12/site-packages/sentry_sdk/scrubber.py
new file mode 100644
index 00000000..1df55737
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/scrubber.py
@@ -0,0 +1,174 @@
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    AnnotatedValue,
+    iter_event_frames,
+)
+
+from typing import TYPE_CHECKING, cast, List, Dict
+
+if TYPE_CHECKING:
+    from sentry_sdk._types import Event
+    from typing import Optional
+
+
+DEFAULT_DENYLIST = [
+    # stolen from relay
+    "password",
+    "passwd",
+    "secret",
+    "api_key",
+    "apikey",
+    "auth",
+    "credentials",
+    "mysql_pwd",
+    "privatekey",
+    "private_key",
+    "token",
+    "session",
+    # django
+    "csrftoken",
+    "sessionid",
+    # wsgi
+    "x_csrftoken",
+    "x_forwarded_for",
+    "set_cookie",
+    "cookie",
+    "authorization",
+    "x_api_key",
+    # other common names used in the wild
+    "aiohttp_session",  # aiohttp
+    "connect.sid",  # Express
+    "csrf_token",  # Pyramid
+    "csrf",  # (this is a cookie name used in accepted answers on stack overflow)
+    "_csrf",  # Express
+    "_csrf_token",  # Bottle
+    "PHPSESSID",  # PHP
+    "_session",  # Sanic
+    "symfony",  # Symfony
+    "user_session",  # Vue
+    "_xsrf",  # Tornado
+    "XSRF-TOKEN",  # Angular, Laravel
+]
+
+DEFAULT_PII_DENYLIST = [
+    "x_forwarded_for",
+    "x_real_ip",
+    "ip_address",
+    "remote_addr",
+]
+
+
+class EventScrubber:
+    def __init__(
+        self, denylist=None, recursive=False, send_default_pii=False, pii_denylist=None
+    ):
+        # type: (Optional[List[str]], bool, bool, Optional[List[str]]) -> None
+        """
+        A scrubber that goes through the event payload and removes sensitive data configured through denylists.
+
+        :param denylist: A security denylist that is always scrubbed, defaults to DEFAULT_DENYLIST.
+        :param recursive: Whether to scrub the event payload recursively, default False.
+        :param send_default_pii: Whether pii is sending is on, pii fields are not scrubbed.
+        :param pii_denylist: The denylist to use for scrubbing when pii is not sent, defaults to DEFAULT_PII_DENYLIST.
+        """
+        self.denylist = DEFAULT_DENYLIST.copy() if denylist is None else denylist
+
+        if not send_default_pii:
+            pii_denylist = (
+                DEFAULT_PII_DENYLIST.copy() if pii_denylist is None else pii_denylist
+            )
+            self.denylist += pii_denylist
+
+        self.denylist = [x.lower() for x in self.denylist]
+        self.recursive = recursive
+
+    def scrub_list(self, lst):
+        # type: (object) -> None
+        """
+        If a list is passed to this method, the method recursively searches the list and any
+        nested lists for any dictionaries. The method calls scrub_dict on all dictionaries
+        it finds.
+        If the parameter passed to this method is not a list, the method does nothing.
+        """
+        if not isinstance(lst, list):
+            return
+
+        for v in lst:
+            self.scrub_dict(v)  # no-op unless v is a dict
+            self.scrub_list(v)  # no-op unless v is a list
+
+    def scrub_dict(self, d):
+        # type: (object) -> None
+        """
+        If a dictionary is passed to this method, the method scrubs the dictionary of any
+        sensitive data. The method calls itself recursively on any nested dictionaries (
+        including dictionaries nested in lists) if self.recursive is True.
+        This method does nothing if the parameter passed to it is not a dictionary.
+        """
+        if not isinstance(d, dict):
+            return
+
+        for k, v in d.items():
+            # The cast is needed because mypy is not smart enough to figure out that k must be a
+            # string after the isinstance check.
+            if isinstance(k, str) and k.lower() in self.denylist:
+                d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
+            elif self.recursive:
+                self.scrub_dict(v)  # no-op unless v is a dict
+                self.scrub_list(v)  # no-op unless v is a list
+
+    def scrub_request(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            if "request" in event:
+                if "headers" in event["request"]:
+                    self.scrub_dict(event["request"]["headers"])
+                if "cookies" in event["request"]:
+                    self.scrub_dict(event["request"]["cookies"])
+                if "data" in event["request"]:
+                    self.scrub_dict(event["request"]["data"])
+
+    def scrub_extra(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            if "extra" in event:
+                self.scrub_dict(event["extra"])
+
+    def scrub_user(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            if "user" in event:
+                self.scrub_dict(event["user"])
+
+    def scrub_breadcrumbs(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            if "breadcrumbs" in event:
+                if "values" in event["breadcrumbs"]:
+                    for value in event["breadcrumbs"]["values"]:
+                        if "data" in value:
+                            self.scrub_dict(value["data"])
+
+    def scrub_frames(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            for frame in iter_event_frames(event):
+                if "vars" in frame:
+                    self.scrub_dict(frame["vars"])
+
+    def scrub_spans(self, event):
+        # type: (Event) -> None
+        with capture_internal_exceptions():
+            if "spans" in event:
+                for span in cast(List[Dict[str, object]], event["spans"]):
+                    if "data" in span:
+                        self.scrub_dict(span["data"])
+
+    def scrub_event(self, event):
+        # type: (Event) -> None
+        self.scrub_request(event)
+        self.scrub_extra(event)
+        self.scrub_user(event)
+        self.scrub_breadcrumbs(event)
+        self.scrub_frames(event)
+        self.scrub_spans(event)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/serializer.py b/.venv/lib/python3.12/site-packages/sentry_sdk/serializer.py
new file mode 100644
index 00000000..bc8e38c6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/serializer.py
@@ -0,0 +1,388 @@
+import sys
+import math
+from collections.abc import Mapping, Sequence, Set
+from datetime import datetime
+
+from sentry_sdk.utils import (
+    AnnotatedValue,
+    capture_internal_exception,
+    disable_capture_event,
+    format_timestamp,
+    safe_repr,
+    strip_string,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from types import TracebackType
+
+    from typing import Any
+    from typing import Callable
+    from typing import ContextManager
+    from typing import Dict
+    from typing import List
+    from typing import Optional
+    from typing import Type
+    from typing import Union
+
+    from sentry_sdk._types import NotImplementedType
+
+    Span = Dict[str, Any]
+
+    ReprProcessor = Callable[[Any, Dict[str, Any]], Union[NotImplementedType, str]]
+    Segment = Union[str, int]
+
+
+# Bytes are technically not strings in Python 3, but we can serialize them
+serializable_str_types = (str, bytes, bytearray, memoryview)
+
+
+# Maximum length of JSON-serialized event payloads that can be safely sent
+# before the server may reject the event due to its size. This is not intended
+# to reflect actual values defined server-side, but rather only be an upper
+# bound for events sent by the SDK.
+#
+# Can be overwritten if wanting to send more bytes, e.g. with a custom server.
+# When changing this, keep in mind that events may be a little bit larger than
+# this value due to attached metadata, so keep the number conservative.
+MAX_EVENT_BYTES = 10**6
+
+# Maximum depth and breadth of databags. Excess data will be trimmed. If
+# max_request_body_size is "always", request bodies won't be trimmed.
+MAX_DATABAG_DEPTH = 5
+MAX_DATABAG_BREADTH = 10
+CYCLE_MARKER = "<cyclic>"
+
+
+global_repr_processors = []  # type: List[ReprProcessor]
+
+
+def add_global_repr_processor(processor):
+    # type: (ReprProcessor) -> None
+    global_repr_processors.append(processor)
+
+
+class Memo:
+    __slots__ = ("_ids", "_objs")
+
+    def __init__(self):
+        # type: () -> None
+        self._ids = {}  # type: Dict[int, Any]
+        self._objs = []  # type: List[Any]
+
+    def memoize(self, obj):
+        # type: (Any) -> ContextManager[bool]
+        self._objs.append(obj)
+        return self
+
+    def __enter__(self):
+        # type: () -> bool
+        obj = self._objs[-1]
+        if id(obj) in self._ids:
+            return True
+        else:
+            self._ids[id(obj)] = obj
+            return False
+
+    def __exit__(
+        self,
+        ty,  # type: Optional[Type[BaseException]]
+        value,  # type: Optional[BaseException]
+        tb,  # type: Optional[TracebackType]
+    ):
+        # type: (...) -> None
+        self._ids.pop(id(self._objs.pop()), None)
+
+
+def serialize(event, **kwargs):
+    # type: (Dict[str, Any], **Any) -> Dict[str, Any]
+    """
+    A very smart serializer that takes a dict and emits a json-friendly dict.
+    Currently used for serializing the final Event and also prematurely while fetching the stack
+    local variables for each frame in a stacktrace.
+
+    It works internally with 'databags' which are arbitrary data structures like Mapping, Sequence and Set.
+    The algorithm itself is a recursive graph walk down the data structures it encounters.
+
+    It has the following responsibilities:
+    * Trimming databags and keeping them within MAX_DATABAG_BREADTH and MAX_DATABAG_DEPTH.
+    * Calling safe_repr() on objects appropriately to keep them informative and readable in the final payload.
+    * Annotating the payload with the _meta field whenever trimming happens.
+
+    :param max_request_body_size: If set to "always", will never trim request bodies.
+    :param max_value_length: The max length to strip strings to, defaults to sentry_sdk.consts.DEFAULT_MAX_VALUE_LENGTH
+    :param is_vars: If we're serializing vars early, we want to repr() things that are JSON-serializable to make their type more apparent. For example, it's useful to see the difference between a unicode-string and a bytestring when viewing a stacktrace.
+    :param custom_repr: A custom repr function that runs before safe_repr on the object to be serialized. If it returns None or throws internally, we will fallback to safe_repr.
+
+    """
+    memo = Memo()
+    path = []  # type: List[Segment]
+    meta_stack = []  # type: List[Dict[str, Any]]
+
+    keep_request_bodies = (
+        kwargs.pop("max_request_body_size", None) == "always"
+    )  # type: bool
+    max_value_length = kwargs.pop("max_value_length", None)  # type: Optional[int]
+    is_vars = kwargs.pop("is_vars", False)
+    custom_repr = kwargs.pop("custom_repr", None)  # type: Callable[..., Optional[str]]
+
+    def _safe_repr_wrapper(value):
+        # type: (Any) -> str
+        try:
+            repr_value = None
+            if custom_repr is not None:
+                repr_value = custom_repr(value)
+            return repr_value or safe_repr(value)
+        except Exception:
+            return safe_repr(value)
+
+    def _annotate(**meta):
+        # type: (**Any) -> None
+        while len(meta_stack) <= len(path):
+            try:
+                segment = path[len(meta_stack) - 1]
+                node = meta_stack[-1].setdefault(str(segment), {})
+            except IndexError:
+                node = {}
+
+            meta_stack.append(node)
+
+        meta_stack[-1].setdefault("", {}).update(meta)
+
+    def _is_databag():
+        # type: () -> Optional[bool]
+        """
+        A databag is any value that we need to trim.
+        True for stuff like vars, request bodies, breadcrumbs and extra.
+
+        :returns: `True` for "yes", `False` for :"no", `None` for "maybe soon".
+        """
+        try:
+            if is_vars:
+                return True
+
+            is_request_body = _is_request_body()
+            if is_request_body in (True, None):
+                return is_request_body
+
+            p0 = path[0]
+            if p0 == "breadcrumbs" and path[1] == "values":
+                path[2]
+                return True
+
+            if p0 == "extra":
+                return True
+
+        except IndexError:
+            return None
+
+        return False
+
+    def _is_request_body():
+        # type: () -> Optional[bool]
+        try:
+            if path[0] == "request" and path[1] == "data":
+                return True
+        except IndexError:
+            return None
+
+        return False
+
+    def _serialize_node(
+        obj,  # type: Any
+        is_databag=None,  # type: Optional[bool]
+        is_request_body=None,  # type: Optional[bool]
+        should_repr_strings=None,  # type: Optional[bool]
+        segment=None,  # type: Optional[Segment]
+        remaining_breadth=None,  # type: Optional[Union[int, float]]
+        remaining_depth=None,  # type: Optional[Union[int, float]]
+    ):
+        # type: (...) -> Any
+        if segment is not None:
+            path.append(segment)
+
+        try:
+            with memo.memoize(obj) as result:
+                if result:
+                    return CYCLE_MARKER
+
+                return _serialize_node_impl(
+                    obj,
+                    is_databag=is_databag,
+                    is_request_body=is_request_body,
+                    should_repr_strings=should_repr_strings,
+                    remaining_depth=remaining_depth,
+                    remaining_breadth=remaining_breadth,
+                )
+        except BaseException:
+            capture_internal_exception(sys.exc_info())
+
+            if is_databag:
+                return "<failed to serialize, use init(debug=True) to see error logs>"
+
+            return None
+        finally:
+            if segment is not None:
+                path.pop()
+                del meta_stack[len(path) + 1 :]
+
+    def _flatten_annotated(obj):
+        # type: (Any) -> Any
+        if isinstance(obj, AnnotatedValue):
+            _annotate(**obj.metadata)
+            obj = obj.value
+        return obj
+
+    def _serialize_node_impl(
+        obj,
+        is_databag,
+        is_request_body,
+        should_repr_strings,
+        remaining_depth,
+        remaining_breadth,
+    ):
+        # type: (Any, Optional[bool], Optional[bool], Optional[bool], Optional[Union[float, int]], Optional[Union[float, int]]) -> Any
+        if isinstance(obj, AnnotatedValue):
+            should_repr_strings = False
+        if should_repr_strings is None:
+            should_repr_strings = is_vars
+
+        if is_databag is None:
+            is_databag = _is_databag()
+
+        if is_request_body is None:
+            is_request_body = _is_request_body()
+
+        if is_databag:
+            if is_request_body and keep_request_bodies:
+                remaining_depth = float("inf")
+                remaining_breadth = float("inf")
+            else:
+                if remaining_depth is None:
+                    remaining_depth = MAX_DATABAG_DEPTH
+                if remaining_breadth is None:
+                    remaining_breadth = MAX_DATABAG_BREADTH
+
+        obj = _flatten_annotated(obj)
+
+        if remaining_depth is not None and remaining_depth <= 0:
+            _annotate(rem=[["!limit", "x"]])
+            if is_databag:
+                return _flatten_annotated(
+                    strip_string(_safe_repr_wrapper(obj), max_length=max_value_length)
+                )
+            return None
+
+        if is_databag and global_repr_processors:
+            hints = {"memo": memo, "remaining_depth": remaining_depth}
+            for processor in global_repr_processors:
+                result = processor(obj, hints)
+                if result is not NotImplemented:
+                    return _flatten_annotated(result)
+
+        sentry_repr = getattr(type(obj), "__sentry_repr__", None)
+
+        if obj is None or isinstance(obj, (bool, int, float)):
+            if should_repr_strings or (
+                isinstance(obj, float) and (math.isinf(obj) or math.isnan(obj))
+            ):
+                return _safe_repr_wrapper(obj)
+            else:
+                return obj
+
+        elif callable(sentry_repr):
+            return sentry_repr(obj)
+
+        elif isinstance(obj, datetime):
+            return (
+                str(format_timestamp(obj))
+                if not should_repr_strings
+                else _safe_repr_wrapper(obj)
+            )
+
+        elif isinstance(obj, Mapping):
+            # Create temporary copy here to avoid calling too much code that
+            # might mutate our dictionary while we're still iterating over it.
+            obj = dict(obj.items())
+
+            rv_dict = {}  # type: Dict[str, Any]
+            i = 0
+
+            for k, v in obj.items():
+                if remaining_breadth is not None and i >= remaining_breadth:
+                    _annotate(len=len(obj))
+                    break
+
+                str_k = str(k)
+                v = _serialize_node(
+                    v,
+                    segment=str_k,
+                    should_repr_strings=should_repr_strings,
+                    is_databag=is_databag,
+                    is_request_body=is_request_body,
+                    remaining_depth=(
+                        remaining_depth - 1 if remaining_depth is not None else None
+                    ),
+                    remaining_breadth=remaining_breadth,
+                )
+                rv_dict[str_k] = v
+                i += 1
+
+            return rv_dict
+
+        elif not isinstance(obj, serializable_str_types) and isinstance(
+            obj, (Set, Sequence)
+        ):
+            rv_list = []
+
+            for i, v in enumerate(obj):
+                if remaining_breadth is not None and i >= remaining_breadth:
+                    _annotate(len=len(obj))
+                    break
+
+                rv_list.append(
+                    _serialize_node(
+                        v,
+                        segment=i,
+                        should_repr_strings=should_repr_strings,
+                        is_databag=is_databag,
+                        is_request_body=is_request_body,
+                        remaining_depth=(
+                            remaining_depth - 1 if remaining_depth is not None else None
+                        ),
+                        remaining_breadth=remaining_breadth,
+                    )
+                )
+
+            return rv_list
+
+        if should_repr_strings:
+            obj = _safe_repr_wrapper(obj)
+        else:
+            if isinstance(obj, bytes) or isinstance(obj, bytearray):
+                obj = obj.decode("utf-8", "replace")
+
+            if not isinstance(obj, str):
+                obj = _safe_repr_wrapper(obj)
+
+        is_span_description = (
+            len(path) == 3 and path[0] == "spans" and path[-1] == "description"
+        )
+        if is_span_description:
+            return obj
+
+        return _flatten_annotated(strip_string(obj, max_length=max_value_length))
+
+    #
+    # Start of serialize() function
+    #
+    disable_capture_event.set(True)
+    try:
+        serialized_event = _serialize_node(event, **kwargs)
+        if not is_vars and meta_stack and isinstance(serialized_event, dict):
+            serialized_event["_meta"] = meta_stack[0]
+
+        return serialized_event
+    finally:
+        disable_capture_event.set(False)
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/session.py b/.venv/lib/python3.12/site-packages/sentry_sdk/session.py
new file mode 100644
index 00000000..c1d422c1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/session.py
@@ -0,0 +1,175 @@
+import uuid
+from datetime import datetime, timezone
+
+from sentry_sdk.utils import format_timestamp
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Optional
+    from typing import Union
+    from typing import Any
+    from typing import Dict
+
+    from sentry_sdk._types import SessionStatus
+
+
+def _minute_trunc(ts):
+    # type: (datetime) -> datetime
+    return ts.replace(second=0, microsecond=0)
+
+
+def _make_uuid(
+    val,  # type: Union[str, uuid.UUID]
+):
+    # type: (...) -> uuid.UUID
+    if isinstance(val, uuid.UUID):
+        return val
+    return uuid.UUID(val)
+
+
+class Session:
+    def __init__(
+        self,
+        sid=None,  # type: Optional[Union[str, uuid.UUID]]
+        did=None,  # type: Optional[str]
+        timestamp=None,  # type: Optional[datetime]
+        started=None,  # type: Optional[datetime]
+        duration=None,  # type: Optional[float]
+        status=None,  # type: Optional[SessionStatus]
+        release=None,  # type: Optional[str]
+        environment=None,  # type: Optional[str]
+        user_agent=None,  # type: Optional[str]
+        ip_address=None,  # type: Optional[str]
+        errors=None,  # type: Optional[int]
+        user=None,  # type: Optional[Any]
+        session_mode="application",  # type: str
+    ):
+        # type: (...) -> None
+        if sid is None:
+            sid = uuid.uuid4()
+        if started is None:
+            started = datetime.now(timezone.utc)
+        if status is None:
+            status = "ok"
+        self.status = status
+        self.did = None  # type: Optional[str]
+        self.started = started
+        self.release = None  # type: Optional[str]
+        self.environment = None  # type: Optional[str]
+        self.duration = None  # type: Optional[float]
+        self.user_agent = None  # type: Optional[str]
+        self.ip_address = None  # type: Optional[str]
+        self.session_mode = session_mode  # type: str
+        self.errors = 0
+
+        self.update(
+            sid=sid,
+            did=did,
+            timestamp=timestamp,
+            duration=duration,
+            release=release,
+            environment=environment,
+            user_agent=user_agent,
+            ip_address=ip_address,
+            errors=errors,
+            user=user,
+        )
+
+    @property
+    def truncated_started(self):
+        # type: (...) -> datetime
+        return _minute_trunc(self.started)
+
+    def update(
+        self,
+        sid=None,  # type: Optional[Union[str, uuid.UUID]]
+        did=None,  # type: Optional[str]
+        timestamp=None,  # type: Optional[datetime]
+        started=None,  # type: Optional[datetime]
+        duration=None,  # type: Optional[float]
+        status=None,  # type: Optional[SessionStatus]
+        release=None,  # type: Optional[str]
+        environment=None,  # type: Optional[str]
+        user_agent=None,  # type: Optional[str]
+        ip_address=None,  # type: Optional[str]
+        errors=None,  # type: Optional[int]
+        user=None,  # type: Optional[Any]
+    ):
+        # type: (...) -> None
+        # If a user is supplied we pull some data form it
+        if user:
+            if ip_address is None:
+                ip_address = user.get("ip_address")
+            if did is None:
+                did = user.get("id") or user.get("email") or user.get("username")
+
+        if sid is not None:
+            self.sid = _make_uuid(sid)
+        if did is not None:
+            self.did = str(did)
+        if timestamp is None:
+            timestamp = datetime.now(timezone.utc)
+        self.timestamp = timestamp
+        if started is not None:
+            self.started = started
+        if duration is not None:
+            self.duration = duration
+        if release is not None:
+            self.release = release
+        if environment is not None:
+            self.environment = environment
+        if ip_address is not None:
+            self.ip_address = ip_address
+        if user_agent is not None:
+            self.user_agent = user_agent
+        if errors is not None:
+            self.errors = errors
+
+        if status is not None:
+            self.status = status
+
+    def close(
+        self, status=None  # type: Optional[SessionStatus]
+    ):
+        # type: (...) -> Any
+        if status is None and self.status == "ok":
+            status = "exited"
+        if status is not None:
+            self.update(status=status)
+
+    def get_json_attrs(
+        self, with_user_info=True  # type: Optional[bool]
+    ):
+        # type: (...) -> Any
+        attrs = {}
+        if self.release is not None:
+            attrs["release"] = self.release
+        if self.environment is not None:
+            attrs["environment"] = self.environment
+        if with_user_info:
+            if self.ip_address is not None:
+                attrs["ip_address"] = self.ip_address
+            if self.user_agent is not None:
+                attrs["user_agent"] = self.user_agent
+        return attrs
+
+    def to_json(self):
+        # type: (...) -> Any
+        rv = {
+            "sid": str(self.sid),
+            "init": True,
+            "started": format_timestamp(self.started),
+            "timestamp": format_timestamp(self.timestamp),
+            "status": self.status,
+        }  # type: Dict[str, Any]
+        if self.errors:
+            rv["errors"] = self.errors
+        if self.did is not None:
+            rv["did"] = self.did
+        if self.duration is not None:
+            rv["duration"] = self.duration
+        attrs = self.get_json_attrs()
+        if attrs:
+            rv["attrs"] = attrs
+        return rv
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/sessions.py b/.venv/lib/python3.12/site-packages/sentry_sdk/sessions.py
new file mode 100644
index 00000000..eaeb915e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/sessions.py
@@ -0,0 +1,278 @@
+import os
+import time
+import warnings
+from threading import Thread, Lock
+from contextlib import contextmanager
+
+import sentry_sdk
+from sentry_sdk.envelope import Envelope
+from sentry_sdk.session import Session
+from sentry_sdk.utils import format_timestamp
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Generator
+    from typing import List
+    from typing import Optional
+    from typing import Union
+
+
+def is_auto_session_tracking_enabled(hub=None):
+    # type: (Optional[sentry_sdk.Hub]) -> Union[Any, bool, None]
+    """DEPRECATED: Utility function to find out if session tracking is enabled."""
+
+    # Internal callers should use private _is_auto_session_tracking_enabled, instead.
+    warnings.warn(
+        "This function is deprecated and will be removed in the next major release. "
+        "There is no public API replacement.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    if hub is None:
+        hub = sentry_sdk.Hub.current
+
+    should_track = hub.scope._force_auto_session_tracking
+
+    if should_track is None:
+        client_options = hub.client.options if hub.client else {}
+        should_track = client_options.get("auto_session_tracking", False)
+
+    return should_track
+
+
+@contextmanager
+def auto_session_tracking(hub=None, session_mode="application"):
+    # type: (Optional[sentry_sdk.Hub], str) -> Generator[None, None, None]
+    """DEPRECATED: Use track_session instead
+    Starts and stops a session automatically around a block.
+    """
+    warnings.warn(
+        "This function is deprecated and will be removed in the next major release. "
+        "Use track_session instead.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    if hub is None:
+        hub = sentry_sdk.Hub.current
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore", DeprecationWarning)
+        should_track = is_auto_session_tracking_enabled(hub)
+    if should_track:
+        hub.start_session(session_mode=session_mode)
+    try:
+        yield
+    finally:
+        if should_track:
+            hub.end_session()
+
+
+def is_auto_session_tracking_enabled_scope(scope):
+    # type: (sentry_sdk.Scope) -> bool
+    """
+    DEPRECATED: Utility function to find out if session tracking is enabled.
+    """
+
+    warnings.warn(
+        "This function is deprecated and will be removed in the next major release. "
+        "There is no public API replacement.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    # Internal callers should use private _is_auto_session_tracking_enabled, instead.
+    return _is_auto_session_tracking_enabled(scope)
+
+
+def _is_auto_session_tracking_enabled(scope):
+    # type: (sentry_sdk.Scope) -> bool
+    """
+    Utility function to find out if session tracking is enabled.
+    """
+
+    should_track = scope._force_auto_session_tracking
+    if should_track is None:
+        client_options = sentry_sdk.get_client().options
+        should_track = client_options.get("auto_session_tracking", False)
+
+    return should_track
+
+
+@contextmanager
+def auto_session_tracking_scope(scope, session_mode="application"):
+    # type: (sentry_sdk.Scope, str) -> Generator[None, None, None]
+    """DEPRECATED: This function is a deprecated alias for track_session.
+    Starts and stops a session automatically around a block.
+    """
+
+    warnings.warn(
+        "This function is a deprecated alias for track_session and will be removed in the next major release.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    with track_session(scope, session_mode=session_mode):
+        yield
+
+
+@contextmanager
+def track_session(scope, session_mode="application"):
+    # type: (sentry_sdk.Scope, str) -> Generator[None, None, None]
+    """
+    Start a new session in the provided scope, assuming session tracking is enabled.
+    This is a no-op context manager if session tracking is not enabled.
+    """
+
+    should_track = _is_auto_session_tracking_enabled(scope)
+    if should_track:
+        scope.start_session(session_mode=session_mode)
+    try:
+        yield
+    finally:
+        if should_track:
+            scope.end_session()
+
+
+TERMINAL_SESSION_STATES = ("exited", "abnormal", "crashed")
+MAX_ENVELOPE_ITEMS = 100
+
+
+def make_aggregate_envelope(aggregate_states, attrs):
+    # type: (Any, Any) -> Any
+    return {"attrs": dict(attrs), "aggregates": list(aggregate_states.values())}
+
+
+class SessionFlusher:
+    def __init__(
+        self,
+        capture_func,  # type: Callable[[Envelope], None]
+        flush_interval=60,  # type: int
+    ):
+        # type: (...) -> None
+        self.capture_func = capture_func
+        self.flush_interval = flush_interval
+        self.pending_sessions = []  # type: List[Any]
+        self.pending_aggregates = {}  # type: Dict[Any, Any]
+        self._thread = None  # type: Optional[Thread]
+        self._thread_lock = Lock()
+        self._aggregate_lock = Lock()
+        self._thread_for_pid = None  # type: Optional[int]
+        self._running = True
+
+    def flush(self):
+        # type: (...) -> None
+        pending_sessions = self.pending_sessions
+        self.pending_sessions = []
+
+        with self._aggregate_lock:
+            pending_aggregates = self.pending_aggregates
+            self.pending_aggregates = {}
+
+        envelope = Envelope()
+        for session in pending_sessions:
+            if len(envelope.items) == MAX_ENVELOPE_ITEMS:
+                self.capture_func(envelope)
+                envelope = Envelope()
+
+            envelope.add_session(session)
+
+        for attrs, states in pending_aggregates.items():
+            if len(envelope.items) == MAX_ENVELOPE_ITEMS:
+                self.capture_func(envelope)
+                envelope = Envelope()
+
+            envelope.add_sessions(make_aggregate_envelope(states, attrs))
+
+        if len(envelope.items) > 0:
+            self.capture_func(envelope)
+
+    def _ensure_running(self):
+        # type: (...) -> None
+        """
+        Check that we have an active thread to run in, or create one if not.
+
+        Note that this might fail (e.g. in Python 3.12 it's not possible to
+        spawn new threads at interpreter shutdown). In that case self._running
+        will be False after running this function.
+        """
+        if self._thread_for_pid == os.getpid() and self._thread is not None:
+            return None
+        with self._thread_lock:
+            if self._thread_for_pid == os.getpid() and self._thread is not None:
+                return None
+
+            def _thread():
+                # type: (...) -> None
+                while self._running:
+                    time.sleep(self.flush_interval)
+                    if self._running:
+                        self.flush()
+
+            thread = Thread(target=_thread)
+            thread.daemon = True
+            try:
+                thread.start()
+            except RuntimeError:
+                # Unfortunately at this point the interpreter is in a state that no
+                # longer allows us to spawn a thread and we have to bail.
+                self._running = False
+                return None
+
+            self._thread = thread
+            self._thread_for_pid = os.getpid()
+
+        return None
+
+    def add_aggregate_session(
+        self, session  # type: Session
+    ):
+        # type: (...) -> None
+        # NOTE on `session.did`:
+        # the protocol can deal with buckets that have a distinct-id, however
+        # in practice we expect the python SDK to have an extremely high cardinality
+        # here, effectively making aggregation useless, therefore we do not
+        # aggregate per-did.
+
+        # For this part we can get away with using the global interpreter lock
+        with self._aggregate_lock:
+            attrs = session.get_json_attrs(with_user_info=False)
+            primary_key = tuple(sorted(attrs.items()))
+            secondary_key = session.truncated_started  # (, session.did)
+            states = self.pending_aggregates.setdefault(primary_key, {})
+            state = states.setdefault(secondary_key, {})
+
+            if "started" not in state:
+                state["started"] = format_timestamp(session.truncated_started)
+            # if session.did is not None:
+            #     state["did"] = session.did
+            if session.status == "crashed":
+                state["crashed"] = state.get("crashed", 0) + 1
+            elif session.status == "abnormal":
+                state["abnormal"] = state.get("abnormal", 0) + 1
+            elif session.errors > 0:
+                state["errored"] = state.get("errored", 0) + 1
+            else:
+                state["exited"] = state.get("exited", 0) + 1
+
+    def add_session(
+        self, session  # type: Session
+    ):
+        # type: (...) -> None
+        if session.session_mode == "request":
+            self.add_aggregate_session(session)
+        else:
+            self.pending_sessions.append(session.to_json())
+        self._ensure_running()
+
+    def kill(self):
+        # type: (...) -> None
+        self._running = False
+
+    def __del__(self):
+        # type: (...) -> None
+        self.kill()
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/spotlight.py b/.venv/lib/python3.12/site-packages/sentry_sdk/spotlight.py
new file mode 100644
index 00000000..c2473b77
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/spotlight.py
@@ -0,0 +1,233 @@
+import io
+import logging
+import os
+import urllib.parse
+import urllib.request
+import urllib.error
+import urllib3
+import sys
+
+from itertools import chain, product
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import Dict
+    from typing import Optional
+    from typing import Self
+
+from sentry_sdk.utils import (
+    logger as sentry_logger,
+    env_to_bool,
+    capture_internal_exceptions,
+)
+from sentry_sdk.envelope import Envelope
+
+
+logger = logging.getLogger("spotlight")
+
+
+DEFAULT_SPOTLIGHT_URL = "http://localhost:8969/stream"
+DJANGO_SPOTLIGHT_MIDDLEWARE_PATH = "sentry_sdk.spotlight.SpotlightMiddleware"
+
+
+class SpotlightClient:
+    def __init__(self, url):
+        # type: (str) -> None
+        self.url = url
+        self.http = urllib3.PoolManager()
+        self.tries = 0
+
+    def capture_envelope(self, envelope):
+        # type: (Envelope) -> None
+        body = io.BytesIO()
+        envelope.serialize_into(body)
+        try:
+            req = self.http.request(
+                url=self.url,
+                body=body.getvalue(),
+                method="POST",
+                headers={
+                    "Content-Type": "application/x-sentry-envelope",
+                },
+            )
+            req.close()
+        except Exception as e:
+            # TODO: Implement buffering and retrying with exponential backoff
+            sentry_logger.warning(str(e))
+
+
+try:
+    from django.utils.deprecation import MiddlewareMixin
+    from django.http import HttpResponseServerError, HttpResponse, HttpRequest
+    from django.conf import settings
+
+    SPOTLIGHT_JS_ENTRY_PATH = "/assets/main.js"
+    SPOTLIGHT_JS_SNIPPET_PATTERN = (
+        "<script>window.__spotlight = {{ initOptions: {{ sidecarUrl: '{spotlight_url}', fullPage: false }} }};</script>\n"
+        '<script type="module" crossorigin src="{spotlight_js_url}"></script>\n'
+    )
+    SPOTLIGHT_ERROR_PAGE_SNIPPET = (
+        '<html><base href="{spotlight_url}">\n'
+        '<script>window.__spotlight = {{ initOptions: {{ fullPage: true, startFrom: "/errors/{event_id}" }}}};</script>\n'
+    )
+    CHARSET_PREFIX = "charset="
+    BODY_TAG_NAME = "body"
+    BODY_CLOSE_TAG_POSSIBILITIES = tuple(
+        "</{}>".format("".join(chars))
+        for chars in product(*zip(BODY_TAG_NAME.upper(), BODY_TAG_NAME.lower()))
+    )
+
+    class SpotlightMiddleware(MiddlewareMixin):  # type: ignore[misc]
+        _spotlight_script = None  # type: Optional[str]
+        _spotlight_url = None  # type: Optional[str]
+
+        def __init__(self, get_response):
+            # type: (Self, Callable[..., HttpResponse]) -> None
+            super().__init__(get_response)
+
+            import sentry_sdk.api
+
+            self.sentry_sdk = sentry_sdk.api
+
+            spotlight_client = self.sentry_sdk.get_client().spotlight
+            if spotlight_client is None:
+                sentry_logger.warning(
+                    "Cannot find Spotlight client from SpotlightMiddleware, disabling the middleware."
+                )
+                return None
+            # Spotlight URL has a trailing `/stream` part at the end so split it off
+            self._spotlight_url = urllib.parse.urljoin(spotlight_client.url, "../")
+
+        @property
+        def spotlight_script(self):
+            # type: (Self) -> Optional[str]
+            if self._spotlight_url is not None and self._spotlight_script is None:
+                try:
+                    spotlight_js_url = urllib.parse.urljoin(
+                        self._spotlight_url, SPOTLIGHT_JS_ENTRY_PATH
+                    )
+                    req = urllib.request.Request(
+                        spotlight_js_url,
+                        method="HEAD",
+                    )
+                    urllib.request.urlopen(req)
+                    self._spotlight_script = SPOTLIGHT_JS_SNIPPET_PATTERN.format(
+                        spotlight_url=self._spotlight_url,
+                        spotlight_js_url=spotlight_js_url,
+                    )
+                except urllib.error.URLError as err:
+                    sentry_logger.debug(
+                        "Cannot get Spotlight JS to inject at %s. SpotlightMiddleware will not be very useful.",
+                        spotlight_js_url,
+                        exc_info=err,
+                    )
+
+            return self._spotlight_script
+
+        def process_response(self, _request, response):
+            # type: (Self, HttpRequest, HttpResponse) -> Optional[HttpResponse]
+            content_type_header = tuple(
+                p.strip()
+                for p in response.headers.get("Content-Type", "").lower().split(";")
+            )
+            content_type = content_type_header[0]
+            if len(content_type_header) > 1 and content_type_header[1].startswith(
+                CHARSET_PREFIX
+            ):
+                encoding = content_type_header[1][len(CHARSET_PREFIX) :]
+            else:
+                encoding = "utf-8"
+
+            if (
+                self.spotlight_script is not None
+                and not response.streaming
+                and content_type == "text/html"
+            ):
+                content_length = len(response.content)
+                injection = self.spotlight_script.encode(encoding)
+                injection_site = next(
+                    (
+                        idx
+                        for idx in (
+                            response.content.rfind(body_variant.encode(encoding))
+                            for body_variant in BODY_CLOSE_TAG_POSSIBILITIES
+                        )
+                        if idx > -1
+                    ),
+                    content_length,
+                )
+
+                # This approach works even when we don't have a `</body>` tag
+                response.content = (
+                    response.content[:injection_site]
+                    + injection
+                    + response.content[injection_site:]
+                )
+
+                if response.has_header("Content-Length"):
+                    response.headers["Content-Length"] = content_length + len(injection)
+
+            return response
+
+        def process_exception(self, _request, exception):
+            # type: (Self, HttpRequest, Exception) -> Optional[HttpResponseServerError]
+            if not settings.DEBUG or not self._spotlight_url:
+                return None
+
+            try:
+                spotlight = (
+                    urllib.request.urlopen(self._spotlight_url).read().decode("utf-8")
+                )
+            except urllib.error.URLError:
+                return None
+            else:
+                event_id = self.sentry_sdk.capture_exception(exception)
+                return HttpResponseServerError(
+                    spotlight.replace(
+                        "<html>",
+                        SPOTLIGHT_ERROR_PAGE_SNIPPET.format(
+                            spotlight_url=self._spotlight_url, event_id=event_id
+                        ),
+                    )
+                )
+
+except ImportError:
+    settings = None
+
+
+def setup_spotlight(options):
+    # type: (Dict[str, Any]) -> Optional[SpotlightClient]
+    _handler = logging.StreamHandler(sys.stderr)
+    _handler.setFormatter(logging.Formatter(" [spotlight] %(levelname)s: %(message)s"))
+    logger.addHandler(_handler)
+    logger.setLevel(logging.INFO)
+
+    url = options.get("spotlight")
+
+    if url is True:
+        url = DEFAULT_SPOTLIGHT_URL
+
+    if not isinstance(url, str):
+        return None
+
+    with capture_internal_exceptions():
+        if (
+            settings is not None
+            and settings.DEBUG
+            and env_to_bool(os.environ.get("SENTRY_SPOTLIGHT_ON_ERROR", "1"))
+            and env_to_bool(os.environ.get("SENTRY_SPOTLIGHT_MIDDLEWARE", "1"))
+        ):
+            middleware = settings.MIDDLEWARE
+            if DJANGO_SPOTLIGHT_MIDDLEWARE_PATH not in middleware:
+                settings.MIDDLEWARE = type(middleware)(
+                    chain(middleware, (DJANGO_SPOTLIGHT_MIDDLEWARE_PATH,))
+                )
+                logger.info("Enabled Spotlight integration for Django")
+
+    client = SpotlightClient(url)
+    logger.info("Enabled Spotlight using sidecar at %s", url)
+
+    return client
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/tracing.py b/.venv/lib/python3.12/site-packages/sentry_sdk/tracing.py
new file mode 100644
index 00000000..13d9f63d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/tracing.py
@@ -0,0 +1,1358 @@
+import uuid
+import warnings
+from datetime import datetime, timedelta, timezone
+from enum import Enum
+
+import sentry_sdk
+from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANDATA
+from sentry_sdk.profiler.continuous_profiler import get_profiler_id
+from sentry_sdk.utils import (
+    get_current_thread_meta,
+    is_valid_sample_rate,
+    logger,
+    nanosecond_time,
+    should_be_treated_as_error,
+)
+
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+    from collections.abc import Callable, Mapping, MutableMapping
+    from typing import Any
+    from typing import Dict
+    from typing import Iterator
+    from typing import List
+    from typing import Optional
+    from typing import overload
+    from typing import ParamSpec
+    from typing import Tuple
+    from typing import Union
+    from typing import TypeVar
+
+    from typing_extensions import TypedDict, Unpack
+
+    P = ParamSpec("P")
+    R = TypeVar("R")
+
+    from sentry_sdk.profiler.continuous_profiler import ContinuousProfile
+    from sentry_sdk.profiler.transaction_profiler import Profile
+    from sentry_sdk._types import (
+        Event,
+        MeasurementUnit,
+        SamplingContext,
+        MeasurementValue,
+    )
+
+    class SpanKwargs(TypedDict, total=False):
+        trace_id: str
+        """
+        The trace ID of the root span. If this new span is to be the root span,
+        omit this parameter, and a new trace ID will be generated.
+        """
+
+        span_id: str
+        """The span ID of this span. If omitted, a new span ID will be generated."""
+
+        parent_span_id: str
+        """The span ID of the parent span, if applicable."""
+
+        same_process_as_parent: bool
+        """Whether this span is in the same process as the parent span."""
+
+        sampled: bool
+        """
+        Whether the span should be sampled. Overrides the default sampling decision
+        for this span when provided.
+        """
+
+        op: str
+        """
+        The span's operation. A list of recommended values is available here:
+        https://develop.sentry.dev/sdk/performance/span-operations/
+        """
+
+        description: str
+        """A description of what operation is being performed within the span. This argument is DEPRECATED. Please use the `name` parameter, instead."""
+
+        hub: Optional["sentry_sdk.Hub"]
+        """The hub to use for this span. This argument is DEPRECATED. Please use the `scope` parameter, instead."""
+
+        status: str
+        """The span's status. Possible values are listed at https://develop.sentry.dev/sdk/event-payloads/span/"""
+
+        containing_transaction: Optional["Transaction"]
+        """The transaction that this span belongs to."""
+
+        start_timestamp: Optional[Union[datetime, float]]
+        """
+        The timestamp when the span started. If omitted, the current time
+        will be used.
+        """
+
+        scope: "sentry_sdk.Scope"
+        """The scope to use for this span. If not provided, we use the current scope."""
+
+        origin: str
+        """
+        The origin of the span.
+        See https://develop.sentry.dev/sdk/performance/trace-origin/
+        Default "manual".
+        """
+
+        name: str
+        """A string describing what operation is being performed within the span/transaction."""
+
+    class TransactionKwargs(SpanKwargs, total=False):
+        source: str
+        """
+        A string describing the source of the transaction name. This will be used to determine the transaction's type.
+        See https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-annotations for more information.
+        Default "custom".
+        """
+
+        parent_sampled: bool
+        """Whether the parent transaction was sampled. If True this transaction will be kept, if False it will be discarded."""
+
+        baggage: "Baggage"
+        """The W3C baggage header value. (see https://www.w3.org/TR/baggage/)"""
+
+    ProfileContext = TypedDict(
+        "ProfileContext",
+        {
+            "profiler_id": str,
+        },
+    )
+
+BAGGAGE_HEADER_NAME = "baggage"
+SENTRY_TRACE_HEADER_NAME = "sentry-trace"
+
+
+# Transaction source
+# see https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-annotations
+class TransactionSource(str, Enum):
+    COMPONENT = "component"
+    CUSTOM = "custom"
+    ROUTE = "route"
+    TASK = "task"
+    URL = "url"
+    VIEW = "view"
+
+    def __str__(self):
+        # type: () -> str
+        return self.value
+
+
+# These are typically high cardinality and the server hates them
+LOW_QUALITY_TRANSACTION_SOURCES = [
+    TransactionSource.URL,
+]
+
+SOURCE_FOR_STYLE = {
+    "endpoint": TransactionSource.COMPONENT,
+    "function_name": TransactionSource.COMPONENT,
+    "handler_name": TransactionSource.COMPONENT,
+    "method_and_path_pattern": TransactionSource.ROUTE,
+    "path": TransactionSource.URL,
+    "route_name": TransactionSource.COMPONENT,
+    "route_pattern": TransactionSource.ROUTE,
+    "uri_template": TransactionSource.ROUTE,
+    "url": TransactionSource.ROUTE,
+}
+
+
+def get_span_status_from_http_code(http_status_code):
+    # type: (int) -> str
+    """
+    Returns the Sentry status corresponding to the given HTTP status code.
+
+    See: https://develop.sentry.dev/sdk/event-payloads/contexts/#trace-context
+    """
+    if http_status_code < 400:
+        return SPANSTATUS.OK
+
+    elif 400 <= http_status_code < 500:
+        if http_status_code == 403:
+            return SPANSTATUS.PERMISSION_DENIED
+        elif http_status_code == 404:
+            return SPANSTATUS.NOT_FOUND
+        elif http_status_code == 429:
+            return SPANSTATUS.RESOURCE_EXHAUSTED
+        elif http_status_code == 413:
+            return SPANSTATUS.FAILED_PRECONDITION
+        elif http_status_code == 401:
+            return SPANSTATUS.UNAUTHENTICATED
+        elif http_status_code == 409:
+            return SPANSTATUS.ALREADY_EXISTS
+        else:
+            return SPANSTATUS.INVALID_ARGUMENT
+
+    elif 500 <= http_status_code < 600:
+        if http_status_code == 504:
+            return SPANSTATUS.DEADLINE_EXCEEDED
+        elif http_status_code == 501:
+            return SPANSTATUS.UNIMPLEMENTED
+        elif http_status_code == 503:
+            return SPANSTATUS.UNAVAILABLE
+        else:
+            return SPANSTATUS.INTERNAL_ERROR
+
+    return SPANSTATUS.UNKNOWN_ERROR
+
+
+class _SpanRecorder:
+    """Limits the number of spans recorded in a transaction."""
+
+    __slots__ = ("maxlen", "spans", "dropped_spans")
+
+    def __init__(self, maxlen):
+        # type: (int) -> None
+        # FIXME: this is `maxlen - 1` only to preserve historical behavior
+        # enforced by tests.
+        # Either this should be changed to `maxlen` or the JS SDK implementation
+        # should be changed to match a consistent interpretation of what maxlen
+        # limits: either transaction+spans or only child spans.
+        self.maxlen = maxlen - 1
+        self.spans = []  # type: List[Span]
+        self.dropped_spans = 0  # type: int
+
+    def add(self, span):
+        # type: (Span) -> None
+        if len(self.spans) > self.maxlen:
+            span._span_recorder = None
+            self.dropped_spans += 1
+        else:
+            self.spans.append(span)
+
+
+class Span:
+    """A span holds timing information of a block of code.
+    Spans can have multiple child spans thus forming a span tree.
+
+    :param trace_id: The trace ID of the root span. If this new span is to be the root span,
+        omit this parameter, and a new trace ID will be generated.
+    :param span_id: The span ID of this span. If omitted, a new span ID will be generated.
+    :param parent_span_id: The span ID of the parent span, if applicable.
+    :param same_process_as_parent: Whether this span is in the same process as the parent span.
+    :param sampled: Whether the span should be sampled. Overrides the default sampling decision
+        for this span when provided.
+    :param op: The span's operation. A list of recommended values is available here:
+        https://develop.sentry.dev/sdk/performance/span-operations/
+    :param description: A description of what operation is being performed within the span.
+
+        .. deprecated:: 2.15.0
+            Please use the `name` parameter, instead.
+    :param name: A string describing what operation is being performed within the span.
+    :param hub: The hub to use for this span.
+
+        .. deprecated:: 2.0.0
+            Please use the `scope` parameter, instead.
+    :param status: The span's status. Possible values are listed at
+        https://develop.sentry.dev/sdk/event-payloads/span/
+    :param containing_transaction: The transaction that this span belongs to.
+    :param start_timestamp: The timestamp when the span started. If omitted, the current time
+        will be used.
+    :param scope: The scope to use for this span. If not provided, we use the current scope.
+    """
+
+    __slots__ = (
+        "trace_id",
+        "span_id",
+        "parent_span_id",
+        "same_process_as_parent",
+        "sampled",
+        "op",
+        "description",
+        "_measurements",
+        "start_timestamp",
+        "_start_timestamp_monotonic_ns",
+        "status",
+        "timestamp",
+        "_tags",
+        "_data",
+        "_span_recorder",
+        "hub",
+        "_context_manager_state",
+        "_containing_transaction",
+        "_local_aggregator",
+        "scope",
+        "origin",
+        "name",
+    )
+
+    def __init__(
+        self,
+        trace_id=None,  # type: Optional[str]
+        span_id=None,  # type: Optional[str]
+        parent_span_id=None,  # type: Optional[str]
+        same_process_as_parent=True,  # type: bool
+        sampled=None,  # type: Optional[bool]
+        op=None,  # type: Optional[str]
+        description=None,  # type: Optional[str]
+        hub=None,  # type: Optional[sentry_sdk.Hub]  # deprecated
+        status=None,  # type: Optional[str]
+        containing_transaction=None,  # type: Optional[Transaction]
+        start_timestamp=None,  # type: Optional[Union[datetime, float]]
+        scope=None,  # type: Optional[sentry_sdk.Scope]
+        origin="manual",  # type: str
+        name=None,  # type: Optional[str]
+    ):
+        # type: (...) -> None
+        self.trace_id = trace_id or uuid.uuid4().hex
+        self.span_id = span_id or uuid.uuid4().hex[16:]
+        self.parent_span_id = parent_span_id
+        self.same_process_as_parent = same_process_as_parent
+        self.sampled = sampled
+        self.op = op
+        self.description = name or description
+        self.status = status
+        self.hub = hub  # backwards compatibility
+        self.scope = scope
+        self.origin = origin
+        self._measurements = {}  # type: Dict[str, MeasurementValue]
+        self._tags = {}  # type: MutableMapping[str, str]
+        self._data = {}  # type: Dict[str, Any]
+        self._containing_transaction = containing_transaction
+
+        if hub is not None:
+            warnings.warn(
+                "The `hub` parameter is deprecated. Please use `scope` instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+            self.scope = self.scope or hub.scope
+
+        if start_timestamp is None:
+            start_timestamp = datetime.now(timezone.utc)
+        elif isinstance(start_timestamp, float):
+            start_timestamp = datetime.fromtimestamp(start_timestamp, timezone.utc)
+        self.start_timestamp = start_timestamp
+        try:
+            # profiling depends on this value and requires that
+            # it is measured in nanoseconds
+            self._start_timestamp_monotonic_ns = nanosecond_time()
+        except AttributeError:
+            pass
+
+        #: End timestamp of span
+        self.timestamp = None  # type: Optional[datetime]
+
+        self._span_recorder = None  # type: Optional[_SpanRecorder]
+        self._local_aggregator = None  # type: Optional[LocalAggregator]
+
+        self.update_active_thread()
+        self.set_profiler_id(get_profiler_id())
+
+    # TODO this should really live on the Transaction class rather than the Span
+    # class
+    def init_span_recorder(self, maxlen):
+        # type: (int) -> None
+        if self._span_recorder is None:
+            self._span_recorder = _SpanRecorder(maxlen)
+
+    def _get_local_aggregator(self):
+        # type: (...) -> LocalAggregator
+        rv = self._local_aggregator
+        if rv is None:
+            rv = self._local_aggregator = LocalAggregator()
+        return rv
+
+    def __repr__(self):
+        # type: () -> str
+        return (
+            "<%s(op=%r, description:%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r, origin=%r)>"
+            % (
+                self.__class__.__name__,
+                self.op,
+                self.description,
+                self.trace_id,
+                self.span_id,
+                self.parent_span_id,
+                self.sampled,
+                self.origin,
+            )
+        )
+
+    def __enter__(self):
+        # type: () -> Span
+        scope = self.scope or sentry_sdk.get_current_scope()
+        old_span = scope.span
+        scope.span = self
+        self._context_manager_state = (scope, old_span)
+        return self
+
+    def __exit__(self, ty, value, tb):
+        # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
+        if value is not None and should_be_treated_as_error(ty, value):
+            self.set_status(SPANSTATUS.INTERNAL_ERROR)
+
+        scope, old_span = self._context_manager_state
+        del self._context_manager_state
+        self.finish(scope)
+        scope.span = old_span
+
+    @property
+    def containing_transaction(self):
+        # type: () -> Optional[Transaction]
+        """The ``Transaction`` that this span belongs to.
+        The ``Transaction`` is the root of the span tree,
+        so one could also think of this ``Transaction`` as the "root span"."""
+
+        # this is a getter rather than a regular attribute so that transactions
+        # can return `self` here instead (as a way to prevent them circularly
+        # referencing themselves)
+        return self._containing_transaction
+
+    def start_child(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
+        # type: (str, **Any) -> Span
+        """
+        Start a sub-span from the current span or transaction.
+
+        Takes the same arguments as the initializer of :py:class:`Span`. The
+        trace id, sampling decision, transaction pointer, and span recorder are
+        inherited from the current span/transaction.
+
+        The instrumenter parameter is deprecated for user code, and it will
+        be removed in the next major version. Going forward, it should only
+        be used by the SDK itself.
+        """
+        if kwargs.get("description") is not None:
+            warnings.warn(
+                "The `description` parameter is deprecated. Please use `name` instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+        configuration_instrumenter = sentry_sdk.get_client().options["instrumenter"]
+
+        if instrumenter != configuration_instrumenter:
+            return NoOpSpan()
+
+        kwargs.setdefault("sampled", self.sampled)
+
+        child = Span(
+            trace_id=self.trace_id,
+            parent_span_id=self.span_id,
+            containing_transaction=self.containing_transaction,
+            **kwargs,
+        )
+
+        span_recorder = (
+            self.containing_transaction and self.containing_transaction._span_recorder
+        )
+        if span_recorder:
+            span_recorder.add(child)
+
+        return child
+
+    @classmethod
+    def continue_from_environ(
+        cls,
+        environ,  # type: Mapping[str, str]
+        **kwargs,  # type: Any
+    ):
+        # type: (...) -> Transaction
+        """
+        Create a Transaction with the given params, then add in data pulled from
+        the ``sentry-trace`` and ``baggage`` headers from the environ (if any)
+        before returning the Transaction.
+
+        This is different from :py:meth:`~sentry_sdk.tracing.Span.continue_from_headers`
+        in that it assumes header names in the form ``HTTP_HEADER_NAME`` -
+        such as you would get from a WSGI/ASGI environ -
+        rather than the form ``header-name``.
+
+        :param environ: The ASGI/WSGI environ to pull information from.
+        """
+        if cls is Span:
+            logger.warning(
+                "Deprecated: use Transaction.continue_from_environ "
+                "instead of Span.continue_from_environ."
+            )
+        return Transaction.continue_from_headers(EnvironHeaders(environ), **kwargs)
+
+    @classmethod
+    def continue_from_headers(
+        cls,
+        headers,  # type: Mapping[str, str]
+        *,
+        _sample_rand=None,  # type: Optional[str]
+        **kwargs,  # type: Any
+    ):
+        # type: (...) -> Transaction
+        """
+        Create a transaction with the given params (including any data pulled from
+        the ``sentry-trace`` and ``baggage`` headers).
+
+        :param headers: The dictionary with the HTTP headers to pull information from.
+        :param _sample_rand: If provided, we override the sample_rand value from the
+            incoming headers with this value. (internal use only)
+        """
+        # TODO move this to the Transaction class
+        if cls is Span:
+            logger.warning(
+                "Deprecated: use Transaction.continue_from_headers "
+                "instead of Span.continue_from_headers."
+            )
+
+        # TODO-neel move away from this kwargs stuff, it's confusing and opaque
+        # make more explicit
+        baggage = Baggage.from_incoming_header(
+            headers.get(BAGGAGE_HEADER_NAME), _sample_rand=_sample_rand
+        )
+        kwargs.update({BAGGAGE_HEADER_NAME: baggage})
+
+        sentrytrace_kwargs = extract_sentrytrace_data(
+            headers.get(SENTRY_TRACE_HEADER_NAME)
+        )
+
+        if sentrytrace_kwargs is not None:
+            kwargs.update(sentrytrace_kwargs)
+
+            # If there's an incoming sentry-trace but no incoming baggage header,
+            # for instance in traces coming from older SDKs,
+            # baggage will be empty and immutable and won't be populated as head SDK.
+            baggage.freeze()
+
+        transaction = Transaction(**kwargs)
+        transaction.same_process_as_parent = False
+
+        return transaction
+
+    def iter_headers(self):
+        # type: () -> Iterator[Tuple[str, str]]
+        """
+        Creates a generator which returns the span's ``sentry-trace`` and ``baggage`` headers.
+        If the span's containing transaction doesn't yet have a ``baggage`` value,
+        this will cause one to be generated and stored.
+        """
+        if not self.containing_transaction:
+            # Do not propagate headers if there is no containing transaction. Otherwise, this
+            # span ends up being the root span of a new trace, and since it does not get sent
+            # to Sentry, the trace will be missing a root transaction. The dynamic sampling
+            # context will also be missing, breaking dynamic sampling & traces.
+            return
+
+        yield SENTRY_TRACE_HEADER_NAME, self.to_traceparent()
+
+        baggage = self.containing_transaction.get_baggage().serialize()
+        if baggage:
+            yield BAGGAGE_HEADER_NAME, baggage
+
+    @classmethod
+    def from_traceparent(
+        cls,
+        traceparent,  # type: Optional[str]
+        **kwargs,  # type: Any
+    ):
+        # type: (...) -> Optional[Transaction]
+        """
+        DEPRECATED: Use :py:meth:`sentry_sdk.tracing.Span.continue_from_headers`.
+
+        Create a ``Transaction`` with the given params, then add in data pulled from
+        the given ``sentry-trace`` header value before returning the ``Transaction``.
+        """
+        logger.warning(
+            "Deprecated: Use Transaction.continue_from_headers(headers, **kwargs) "
+            "instead of from_traceparent(traceparent, **kwargs)"
+        )
+
+        if not traceparent:
+            return None
+
+        return cls.continue_from_headers(
+            {SENTRY_TRACE_HEADER_NAME: traceparent}, **kwargs
+        )
+
+    def to_traceparent(self):
+        # type: () -> str
+        if self.sampled is True:
+            sampled = "1"
+        elif self.sampled is False:
+            sampled = "0"
+        else:
+            sampled = None
+
+        traceparent = "%s-%s" % (self.trace_id, self.span_id)
+        if sampled is not None:
+            traceparent += "-%s" % (sampled,)
+
+        return traceparent
+
+    def to_baggage(self):
+        # type: () -> Optional[Baggage]
+        """Returns the :py:class:`~sentry_sdk.tracing_utils.Baggage`
+        associated with this ``Span``, if any. (Taken from the root of the span tree.)
+        """
+        if self.containing_transaction:
+            return self.containing_transaction.get_baggage()
+        return None
+
+    def set_tag(self, key, value):
+        # type: (str, Any) -> None
+        self._tags[key] = value
+
+    def set_data(self, key, value):
+        # type: (str, Any) -> None
+        self._data[key] = value
+
+    def set_status(self, value):
+        # type: (str) -> None
+        self.status = value
+
+    def set_measurement(self, name, value, unit=""):
+        # type: (str, float, MeasurementUnit) -> None
+        self._measurements[name] = {"value": value, "unit": unit}
+
+    def set_thread(self, thread_id, thread_name):
+        # type: (Optional[int], Optional[str]) -> None
+
+        if thread_id is not None:
+            self.set_data(SPANDATA.THREAD_ID, str(thread_id))
+
+            if thread_name is not None:
+                self.set_data(SPANDATA.THREAD_NAME, thread_name)
+
+    def set_profiler_id(self, profiler_id):
+        # type: (Optional[str]) -> None
+        if profiler_id is not None:
+            self.set_data(SPANDATA.PROFILER_ID, profiler_id)
+
+    def set_http_status(self, http_status):
+        # type: (int) -> None
+        self.set_tag(
+            "http.status_code", str(http_status)
+        )  # we keep this for backwards compatibility
+        self.set_data(SPANDATA.HTTP_STATUS_CODE, http_status)
+        self.set_status(get_span_status_from_http_code(http_status))
+
+    def is_success(self):
+        # type: () -> bool
+        return self.status == "ok"
+
+    def finish(self, scope=None, end_timestamp=None):
+        # type: (Optional[sentry_sdk.Scope], Optional[Union[float, datetime]]) -> Optional[str]
+        """
+        Sets the end timestamp of the span.
+
+        Additionally it also creates a breadcrumb from the span,
+        if the span represents a database or HTTP request.
+
+        :param scope: The scope to use for this transaction.
+            If not provided, the current scope will be used.
+        :param end_timestamp: Optional timestamp that should
+            be used as timestamp instead of the current time.
+
+        :return: Always ``None``. The type is ``Optional[str]`` to match
+            the return value of :py:meth:`sentry_sdk.tracing.Transaction.finish`.
+        """
+        if self.timestamp is not None:
+            # This span is already finished, ignore.
+            return None
+
+        try:
+            if end_timestamp:
+                if isinstance(end_timestamp, float):
+                    end_timestamp = datetime.fromtimestamp(end_timestamp, timezone.utc)
+                self.timestamp = end_timestamp
+            else:
+                elapsed = nanosecond_time() - self._start_timestamp_monotonic_ns
+                self.timestamp = self.start_timestamp + timedelta(
+                    microseconds=elapsed / 1000
+                )
+        except AttributeError:
+            self.timestamp = datetime.now(timezone.utc)
+
+        scope = scope or sentry_sdk.get_current_scope()
+        maybe_create_breadcrumbs_from_span(scope, self)
+
+        return None
+
+    def to_json(self):
+        # type: () -> Dict[str, Any]
+        """Returns a JSON-compatible representation of the span."""
+
+        rv = {
+            "trace_id": self.trace_id,
+            "span_id": self.span_id,
+            "parent_span_id": self.parent_span_id,
+            "same_process_as_parent": self.same_process_as_parent,
+            "op": self.op,
+            "description": self.description,
+            "start_timestamp": self.start_timestamp,
+            "timestamp": self.timestamp,
+            "origin": self.origin,
+        }  # type: Dict[str, Any]
+
+        if self.status:
+            self._tags["status"] = self.status
+
+        if self._local_aggregator is not None:
+            metrics_summary = self._local_aggregator.to_json()
+            if metrics_summary:
+                rv["_metrics_summary"] = metrics_summary
+
+        if len(self._measurements) > 0:
+            rv["measurements"] = self._measurements
+
+        tags = self._tags
+        if tags:
+            rv["tags"] = tags
+
+        data = self._data
+        if data:
+            rv["data"] = data
+
+        return rv
+
+    def get_trace_context(self):
+        # type: () -> Any
+        rv = {
+            "trace_id": self.trace_id,
+            "span_id": self.span_id,
+            "parent_span_id": self.parent_span_id,
+            "op": self.op,
+            "description": self.description,
+            "origin": self.origin,
+        }  # type: Dict[str, Any]
+        if self.status:
+            rv["status"] = self.status
+
+        if self.containing_transaction:
+            rv["dynamic_sampling_context"] = (
+                self.containing_transaction.get_baggage().dynamic_sampling_context()
+            )
+
+        data = {}
+
+        thread_id = self._data.get(SPANDATA.THREAD_ID)
+        if thread_id is not None:
+            data["thread.id"] = thread_id
+
+        thread_name = self._data.get(SPANDATA.THREAD_NAME)
+        if thread_name is not None:
+            data["thread.name"] = thread_name
+
+        if data:
+            rv["data"] = data
+
+        return rv
+
+    def get_profile_context(self):
+        # type: () -> Optional[ProfileContext]
+        profiler_id = self._data.get(SPANDATA.PROFILER_ID)
+        if profiler_id is None:
+            return None
+
+        return {
+            "profiler_id": profiler_id,
+        }
+
+    def update_active_thread(self):
+        # type: () -> None
+        thread_id, thread_name = get_current_thread_meta()
+        self.set_thread(thread_id, thread_name)
+
+
+class Transaction(Span):
+    """The Transaction is the root element that holds all the spans
+    for Sentry performance instrumentation.
+
+    :param name: Identifier of the transaction.
+        Will show up in the Sentry UI.
+    :param parent_sampled: Whether the parent transaction was sampled.
+        If True this transaction will be kept, if False it will be discarded.
+    :param baggage: The W3C baggage header value.
+        (see https://www.w3.org/TR/baggage/)
+    :param source: A string describing the source of the transaction name.
+        This will be used to determine the transaction's type.
+        See https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-annotations
+        for more information. Default "custom".
+    :param kwargs: Additional arguments to be passed to the Span constructor.
+        See :py:class:`sentry_sdk.tracing.Span` for available arguments.
+    """
+
+    __slots__ = (
+        "name",
+        "source",
+        "parent_sampled",
+        # used to create baggage value for head SDKs in dynamic sampling
+        "sample_rate",
+        "_measurements",
+        "_contexts",
+        "_profile",
+        "_continuous_profile",
+        "_baggage",
+        "_sample_rand",
+    )
+
+    def __init__(  # type: ignore[misc]
+        self,
+        name="",  # type: str
+        parent_sampled=None,  # type: Optional[bool]
+        baggage=None,  # type: Optional[Baggage]
+        source=TransactionSource.CUSTOM,  # type: str
+        **kwargs,  # type: Unpack[SpanKwargs]
+    ):
+        # type: (...) -> None
+
+        super().__init__(**kwargs)
+
+        self.name = name
+        self.source = source
+        self.sample_rate = None  # type: Optional[float]
+        self.parent_sampled = parent_sampled
+        self._measurements = {}  # type: Dict[str, MeasurementValue]
+        self._contexts = {}  # type: Dict[str, Any]
+        self._profile = None  # type: Optional[Profile]
+        self._continuous_profile = None  # type: Optional[ContinuousProfile]
+        self._baggage = baggage
+
+        baggage_sample_rand = (
+            None if self._baggage is None else self._baggage._sample_rand()
+        )
+        if baggage_sample_rand is not None:
+            self._sample_rand = baggage_sample_rand
+        else:
+            self._sample_rand = _generate_sample_rand(self.trace_id)
+
+    def __repr__(self):
+        # type: () -> str
+        return (
+            "<%s(name=%r, op=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r, source=%r, origin=%r)>"
+            % (
+                self.__class__.__name__,
+                self.name,
+                self.op,
+                self.trace_id,
+                self.span_id,
+                self.parent_span_id,
+                self.sampled,
+                self.source,
+                self.origin,
+            )
+        )
+
+    def _possibly_started(self):
+        # type: () -> bool
+        """Returns whether the transaction might have been started.
+
+        If this returns False, we know that the transaction was not started
+        with sentry_sdk.start_transaction, and therefore the transaction will
+        be discarded.
+        """
+
+        # We must explicitly check self.sampled is False since self.sampled can be None
+        return self._span_recorder is not None or self.sampled is False
+
+    def __enter__(self):
+        # type: () -> Transaction
+        if not self._possibly_started():
+            logger.debug(
+                "Transaction was entered without being started with sentry_sdk.start_transaction."
+                "The transaction will not be sent to Sentry. To fix, start the transaction by"
+                "passing it to sentry_sdk.start_transaction."
+            )
+
+        super().__enter__()
+
+        if self._profile is not None:
+            self._profile.__enter__()
+
+        return self
+
+    def __exit__(self, ty, value, tb):
+        # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
+        if self._profile is not None:
+            self._profile.__exit__(ty, value, tb)
+
+        if self._continuous_profile is not None:
+            self._continuous_profile.stop()
+
+        super().__exit__(ty, value, tb)
+
+    @property
+    def containing_transaction(self):
+        # type: () -> Transaction
+        """The root element of the span tree.
+        In the case of a transaction it is the transaction itself.
+        """
+
+        # Transactions (as spans) belong to themselves (as transactions). This
+        # is a getter rather than a regular attribute to avoid having a circular
+        # reference.
+        return self
+
+    def _get_scope_from_finish_args(
+        self,
+        scope_arg,  # type: Optional[Union[sentry_sdk.Scope, sentry_sdk.Hub]]
+        hub_arg,  # type: Optional[Union[sentry_sdk.Scope, sentry_sdk.Hub]]
+    ):
+        # type: (...) -> Optional[sentry_sdk.Scope]
+        """
+        Logic to get the scope from the arguments passed to finish. This
+        function exists for backwards compatibility with the old finish.
+
+        TODO: Remove this function in the next major version.
+        """
+        scope_or_hub = scope_arg
+        if hub_arg is not None:
+            warnings.warn(
+                "The `hub` parameter is deprecated. Please use the `scope` parameter, instead.",
+                DeprecationWarning,
+                stacklevel=3,
+            )
+
+            scope_or_hub = hub_arg
+
+        if isinstance(scope_or_hub, sentry_sdk.Hub):
+            warnings.warn(
+                "Passing a Hub to finish is deprecated. Please pass a Scope, instead.",
+                DeprecationWarning,
+                stacklevel=3,
+            )
+
+            return scope_or_hub.scope
+
+        return scope_or_hub
+
+    def finish(
+        self,
+        scope=None,  # type: Optional[sentry_sdk.Scope]
+        end_timestamp=None,  # type: Optional[Union[float, datetime]]
+        *,
+        hub=None,  # type: Optional[sentry_sdk.Hub]
+    ):
+        # type: (...) -> Optional[str]
+        """Finishes the transaction and sends it to Sentry.
+        All finished spans in the transaction will also be sent to Sentry.
+
+        :param scope: The Scope to use for this transaction.
+            If not provided, the current Scope will be used.
+        :param end_timestamp: Optional timestamp that should
+            be used as timestamp instead of the current time.
+        :param hub: The hub to use for this transaction.
+            This argument is DEPRECATED. Please use the `scope`
+            parameter, instead.
+
+        :return: The event ID if the transaction was sent to Sentry,
+            otherwise None.
+        """
+        if self.timestamp is not None:
+            # This transaction is already finished, ignore.
+            return None
+
+        # For backwards compatibility, we must handle the case where `scope`
+        # or `hub` could both either be a `Scope` or a `Hub`.
+        scope = self._get_scope_from_finish_args(
+            scope, hub
+        )  # type: Optional[sentry_sdk.Scope]
+
+        scope = scope or self.scope or sentry_sdk.get_current_scope()
+        client = sentry_sdk.get_client()
+
+        if not client.is_active():
+            # We have no active client and therefore nowhere to send this transaction.
+            return None
+
+        if self._span_recorder is None:
+            # Explicit check against False needed because self.sampled might be None
+            if self.sampled is False:
+                logger.debug("Discarding transaction because sampled = False")
+            else:
+                logger.debug(
+                    "Discarding transaction because it was not started with sentry_sdk.start_transaction"
+                )
+
+            # This is not entirely accurate because discards here are not
+            # exclusively based on sample rate but also traces sampler, but
+            # we handle this the same here.
+            if client.transport and has_tracing_enabled(client.options):
+                if client.monitor and client.monitor.downsample_factor > 0:
+                    reason = "backpressure"
+                else:
+                    reason = "sample_rate"
+
+                client.transport.record_lost_event(reason, data_category="transaction")
+
+                # Only one span (the transaction itself) is discarded, since we did not record any spans here.
+                client.transport.record_lost_event(reason, data_category="span")
+            return None
+
+        if not self.name:
+            logger.warning(
+                "Transaction has no name, falling back to `<unlabeled transaction>`."
+            )
+            self.name = "<unlabeled transaction>"
+
+        super().finish(scope, end_timestamp)
+
+        if not self.sampled:
+            # At this point a `sampled = None` should have already been resolved
+            # to a concrete decision.
+            if self.sampled is None:
+                logger.warning("Discarding transaction without sampling decision.")
+
+            return None
+
+        finished_spans = [
+            span.to_json()
+            for span in self._span_recorder.spans
+            if span.timestamp is not None
+        ]
+
+        len_diff = len(self._span_recorder.spans) - len(finished_spans)
+        dropped_spans = len_diff + self._span_recorder.dropped_spans
+
+        # we do this to break the circular reference of transaction -> span
+        # recorder -> span -> containing transaction (which is where we started)
+        # before either the spans or the transaction goes out of scope and has
+        # to be garbage collected
+        self._span_recorder = None
+
+        contexts = {}
+        contexts.update(self._contexts)
+        contexts.update({"trace": self.get_trace_context()})
+        profile_context = self.get_profile_context()
+        if profile_context is not None:
+            contexts.update({"profile": profile_context})
+
+        event = {
+            "type": "transaction",
+            "transaction": self.name,
+            "transaction_info": {"source": self.source},
+            "contexts": contexts,
+            "tags": self._tags,
+            "timestamp": self.timestamp,
+            "start_timestamp": self.start_timestamp,
+            "spans": finished_spans,
+        }  # type: Event
+
+        if dropped_spans > 0:
+            event["_dropped_spans"] = dropped_spans
+
+        if self._profile is not None and self._profile.valid():
+            event["profile"] = self._profile
+            self._profile = None
+
+        event["measurements"] = self._measurements
+
+        # This is here since `to_json` is not invoked.  This really should
+        # be gone when we switch to onlyspans.
+        if self._local_aggregator is not None:
+            metrics_summary = self._local_aggregator.to_json()
+            if metrics_summary:
+                event["_metrics_summary"] = metrics_summary
+
+        return scope.capture_event(event)
+
+    def set_measurement(self, name, value, unit=""):
+        # type: (str, float, MeasurementUnit) -> None
+        self._measurements[name] = {"value": value, "unit": unit}
+
+    def set_context(self, key, value):
+        # type: (str, dict[str, Any]) -> None
+        """Sets a context. Transactions can have multiple contexts
+        and they should follow the format described in the "Contexts Interface"
+        documentation.
+
+        :param key: The name of the context.
+        :param value: The information about the context.
+        """
+        self._contexts[key] = value
+
+    def set_http_status(self, http_status):
+        # type: (int) -> None
+        """Sets the status of the Transaction according to the given HTTP status.
+
+        :param http_status: The HTTP status code."""
+        super().set_http_status(http_status)
+        self.set_context("response", {"status_code": http_status})
+
+    def to_json(self):
+        # type: () -> Dict[str, Any]
+        """Returns a JSON-compatible representation of the transaction."""
+        rv = super().to_json()
+
+        rv["name"] = self.name
+        rv["source"] = self.source
+        rv["sampled"] = self.sampled
+
+        return rv
+
+    def get_trace_context(self):
+        # type: () -> Any
+        trace_context = super().get_trace_context()
+
+        if self._data:
+            trace_context["data"] = self._data
+
+        return trace_context
+
+    def get_baggage(self):
+        # type: () -> Baggage
+        """Returns the :py:class:`~sentry_sdk.tracing_utils.Baggage`
+        associated with the Transaction.
+
+        The first time a new baggage with Sentry items is made,
+        it will be frozen."""
+        if not self._baggage or self._baggage.mutable:
+            self._baggage = Baggage.populate_from_transaction(self)
+
+        return self._baggage
+
+    def _set_initial_sampling_decision(self, sampling_context):
+        # type: (SamplingContext) -> None
+        """
+        Sets the transaction's sampling decision, according to the following
+        precedence rules:
+
+        1. If a sampling decision is passed to `start_transaction`
+        (`start_transaction(name: "my transaction", sampled: True)`), that
+        decision will be used, regardless of anything else
+
+        2. If `traces_sampler` is defined, its decision will be used. It can
+        choose to keep or ignore any parent sampling decision, or use the
+        sampling context data to make its own decision or to choose a sample
+        rate for the transaction.
+
+        3. If `traces_sampler` is not defined, but there's a parent sampling
+        decision, the parent sampling decision will be used.
+
+        4. If `traces_sampler` is not defined and there's no parent sampling
+        decision, `traces_sample_rate` will be used.
+        """
+        client = sentry_sdk.get_client()
+
+        transaction_description = "{op}transaction <{name}>".format(
+            op=("<" + self.op + "> " if self.op else ""), name=self.name
+        )
+
+        # nothing to do if tracing is disabled
+        if not has_tracing_enabled(client.options):
+            self.sampled = False
+            return
+
+        # if the user has forced a sampling decision by passing a `sampled`
+        # value when starting the transaction, go with that
+        if self.sampled is not None:
+            self.sample_rate = float(self.sampled)
+            return
+
+        # we would have bailed already if neither `traces_sampler` nor
+        # `traces_sample_rate` were defined, so one of these should work; prefer
+        # the hook if so
+        sample_rate = (
+            client.options["traces_sampler"](sampling_context)
+            if callable(client.options.get("traces_sampler"))
+            else (
+                # default inheritance behavior
+                sampling_context["parent_sampled"]
+                if sampling_context["parent_sampled"] is not None
+                else client.options["traces_sample_rate"]
+            )
+        )
+
+        # Since this is coming from the user (or from a function provided by the
+        # user), who knows what we might get. (The only valid values are
+        # booleans or numbers between 0 and 1.)
+        if not is_valid_sample_rate(sample_rate, source="Tracing"):
+            logger.warning(
+                "[Tracing] Discarding {transaction_description} because of invalid sample rate.".format(
+                    transaction_description=transaction_description,
+                )
+            )
+            self.sampled = False
+            return
+
+        self.sample_rate = float(sample_rate)
+
+        if client.monitor:
+            self.sample_rate /= 2**client.monitor.downsample_factor
+
+        # if the function returned 0 (or false), or if `traces_sample_rate` is
+        # 0, it's a sign the transaction should be dropped
+        if not self.sample_rate:
+            logger.debug(
+                "[Tracing] Discarding {transaction_description} because {reason}".format(
+                    transaction_description=transaction_description,
+                    reason=(
+                        "traces_sampler returned 0 or False"
+                        if callable(client.options.get("traces_sampler"))
+                        else "traces_sample_rate is set to 0"
+                    ),
+                )
+            )
+            self.sampled = False
+            return
+
+        # Now we roll the dice. self._sample_rand is inclusive of 0, but not of 1,
+        # so strict < is safe here. In case sample_rate is a boolean, cast it
+        # to a float (True becomes 1.0 and False becomes 0.0)
+        self.sampled = self._sample_rand < self.sample_rate
+
+        if self.sampled:
+            logger.debug(
+                "[Tracing] Starting {transaction_description}".format(
+                    transaction_description=transaction_description,
+                )
+            )
+        else:
+            logger.debug(
+                "[Tracing] Discarding {transaction_description} because it's not included in the random sample (sampling rate = {sample_rate})".format(
+                    transaction_description=transaction_description,
+                    sample_rate=self.sample_rate,
+                )
+            )
+
+
+class NoOpSpan(Span):
+    def __repr__(self):
+        # type: () -> str
+        return "<%s>" % self.__class__.__name__
+
+    @property
+    def containing_transaction(self):
+        # type: () -> Optional[Transaction]
+        return None
+
+    def start_child(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
+        # type: (str, **Any) -> NoOpSpan
+        return NoOpSpan()
+
+    def to_traceparent(self):
+        # type: () -> str
+        return ""
+
+    def to_baggage(self):
+        # type: () -> Optional[Baggage]
+        return None
+
+    def get_baggage(self):
+        # type: () -> Optional[Baggage]
+        return None
+
+    def iter_headers(self):
+        # type: () -> Iterator[Tuple[str, str]]
+        return iter(())
+
+    def set_tag(self, key, value):
+        # type: (str, Any) -> None
+        pass
+
+    def set_data(self, key, value):
+        # type: (str, Any) -> None
+        pass
+
+    def set_status(self, value):
+        # type: (str) -> None
+        pass
+
+    def set_http_status(self, http_status):
+        # type: (int) -> None
+        pass
+
+    def is_success(self):
+        # type: () -> bool
+        return True
+
+    def to_json(self):
+        # type: () -> Dict[str, Any]
+        return {}
+
+    def get_trace_context(self):
+        # type: () -> Any
+        return {}
+
+    def get_profile_context(self):
+        # type: () -> Any
+        return {}
+
+    def finish(
+        self,
+        scope=None,  # type: Optional[sentry_sdk.Scope]
+        end_timestamp=None,  # type: Optional[Union[float, datetime]]
+        *,
+        hub=None,  # type: Optional[sentry_sdk.Hub]
+    ):
+        # type: (...) -> Optional[str]
+        """
+        The `hub` parameter is deprecated. Please use the `scope` parameter, instead.
+        """
+        pass
+
+    def set_measurement(self, name, value, unit=""):
+        # type: (str, float, MeasurementUnit) -> None
+        pass
+
+    def set_context(self, key, value):
+        # type: (str, dict[str, Any]) -> None
+        pass
+
+    def init_span_recorder(self, maxlen):
+        # type: (int) -> None
+        pass
+
+    def _set_initial_sampling_decision(self, sampling_context):
+        # type: (SamplingContext) -> None
+        pass
+
+
+if TYPE_CHECKING:
+
+    @overload
+    def trace(func=None):
+        # type: (None) -> Callable[[Callable[P, R]], Callable[P, R]]
+        pass
+
+    @overload
+    def trace(func):
+        # type: (Callable[P, R]) -> Callable[P, R]
+        pass
+
+
+def trace(func=None):
+    # type: (Optional[Callable[P, R]]) -> Union[Callable[P, R], Callable[[Callable[P, R]], Callable[P, R]]]
+    """
+    Decorator to start a child span under the existing current transaction.
+    If there is no current transaction, then nothing will be traced.
+
+    .. code-block::
+        :caption: Usage
+
+        import sentry_sdk
+
+        @sentry_sdk.trace
+        def my_function():
+            ...
+
+        @sentry_sdk.trace
+        async def my_async_function():
+            ...
+    """
+    from sentry_sdk.tracing_utils import start_child_span_decorator
+
+    # This patterns allows usage of both @sentry_traced and @sentry_traced(...)
+    # See https://stackoverflow.com/questions/52126071/decorator-with-arguments-avoid-parenthesis-when-no-arguments/52126278
+    if func:
+        return start_child_span_decorator(func)
+    else:
+        return start_child_span_decorator
+
+
+# Circular imports
+
+from sentry_sdk.tracing_utils import (
+    Baggage,
+    EnvironHeaders,
+    extract_sentrytrace_data,
+    _generate_sample_rand,
+    has_tracing_enabled,
+    maybe_create_breadcrumbs_from_span,
+)
+
+with warnings.catch_warnings():
+    # The code in this file which uses `LocalAggregator` is only called from the deprecated `metrics` module.
+    warnings.simplefilter("ignore", DeprecationWarning)
+    from sentry_sdk.metrics import LocalAggregator
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/tracing_utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/tracing_utils.py
new file mode 100644
index 00000000..ba566957
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/tracing_utils.py
@@ -0,0 +1,904 @@
+import contextlib
+import inspect
+import os
+import re
+import sys
+from collections.abc import Mapping
+from datetime import timedelta
+from decimal import ROUND_DOWN, Context, Decimal
+from functools import wraps
+from random import Random
+from urllib.parse import quote, unquote
+import uuid
+
+import sentry_sdk
+from sentry_sdk.consts import OP, SPANDATA
+from sentry_sdk.utils import (
+    capture_internal_exceptions,
+    filename_for_module,
+    Dsn,
+    logger,
+    match_regex_list,
+    qualname_from_function,
+    to_string,
+    try_convert,
+    is_sentry_url,
+    _is_external_source,
+    _is_in_project_root,
+    _module_in_list,
+)
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Dict
+    from typing import Generator
+    from typing import Optional
+    from typing import Union
+
+    from types import FrameType
+
+
+SENTRY_TRACE_REGEX = re.compile(
+    "^[ \t]*"  # whitespace
+    "([0-9a-f]{32})?"  # trace_id
+    "-?([0-9a-f]{16})?"  # span_id
+    "-?([01])?"  # sampled
+    "[ \t]*$"  # whitespace
+)
+
+
+# This is a normal base64 regex, modified to reflect that fact that we strip the
+# trailing = or == off
+base64_stripped = (
+    # any of the characters in the base64 "alphabet", in multiples of 4
+    "([a-zA-Z0-9+/]{4})*"
+    # either nothing or 2 or 3 base64-alphabet characters (see
+    # https://en.wikipedia.org/wiki/Base64#Decoding_Base64_without_padding for
+    # why there's never only 1 extra character)
+    "([a-zA-Z0-9+/]{2,3})?"
+)
+
+
+class EnvironHeaders(Mapping):  # type: ignore
+    def __init__(
+        self,
+        environ,  # type: Mapping[str, str]
+        prefix="HTTP_",  # type: str
+    ):
+        # type: (...) -> None
+        self.environ = environ
+        self.prefix = prefix
+
+    def __getitem__(self, key):
+        # type: (str) -> Optional[Any]
+        return self.environ[self.prefix + key.replace("-", "_").upper()]
+
+    def __len__(self):
+        # type: () -> int
+        return sum(1 for _ in iter(self))
+
+    def __iter__(self):
+        # type: () -> Generator[str, None, None]
+        for k in self.environ:
+            if not isinstance(k, str):
+                continue
+
+            k = k.replace("-", "_").upper()
+            if not k.startswith(self.prefix):
+                continue
+
+            yield k[len(self.prefix) :]
+
+
+def has_tracing_enabled(options):
+    # type: (Optional[Dict[str, Any]]) -> bool
+    """
+    Returns True if either traces_sample_rate or traces_sampler is
+    defined and enable_tracing is set and not false.
+    """
+    if options is None:
+        return False
+
+    return bool(
+        options.get("enable_tracing") is not False
+        and (
+            options.get("traces_sample_rate") is not None
+            or options.get("traces_sampler") is not None
+        )
+    )
+
+
+@contextlib.contextmanager
+def record_sql_queries(
+    cursor,  # type: Any
+    query,  # type: Any
+    params_list,  # type:  Any
+    paramstyle,  # type: Optional[str]
+    executemany,  # type: bool
+    record_cursor_repr=False,  # type: bool
+    span_origin="manual",  # type: str
+):
+    # type: (...) -> Generator[sentry_sdk.tracing.Span, None, None]
+
+    # TODO: Bring back capturing of params by default
+    if sentry_sdk.get_client().options["_experiments"].get("record_sql_params", False):
+        if not params_list or params_list == [None]:
+            params_list = None
+
+        if paramstyle == "pyformat":
+            paramstyle = "format"
+    else:
+        params_list = None
+        paramstyle = None
+
+    query = _format_sql(cursor, query)
+
+    data = {}
+    if params_list is not None:
+        data["db.params"] = params_list
+    if paramstyle is not None:
+        data["db.paramstyle"] = paramstyle
+    if executemany:
+        data["db.executemany"] = True
+    if record_cursor_repr and cursor is not None:
+        data["db.cursor"] = cursor
+
+    with capture_internal_exceptions():
+        sentry_sdk.add_breadcrumb(message=query, category="query", data=data)
+
+    with sentry_sdk.start_span(
+        op=OP.DB,
+        name=query,
+        origin=span_origin,
+    ) as span:
+        for k, v in data.items():
+            span.set_data(k, v)
+        yield span
+
+
+def maybe_create_breadcrumbs_from_span(scope, span):
+    # type: (sentry_sdk.Scope, sentry_sdk.tracing.Span) -> None
+    if span.op == OP.DB_REDIS:
+        scope.add_breadcrumb(
+            message=span.description, type="redis", category="redis", data=span._tags
+        )
+
+    elif span.op == OP.HTTP_CLIENT:
+        level = None
+        status_code = span._data.get(SPANDATA.HTTP_STATUS_CODE)
+        if status_code:
+            if 500 <= status_code <= 599:
+                level = "error"
+            elif 400 <= status_code <= 499:
+                level = "warning"
+
+        if level:
+            scope.add_breadcrumb(
+                type="http", category="httplib", data=span._data, level=level
+            )
+        else:
+            scope.add_breadcrumb(type="http", category="httplib", data=span._data)
+
+    elif span.op == "subprocess":
+        scope.add_breadcrumb(
+            type="subprocess",
+            category="subprocess",
+            message=span.description,
+            data=span._data,
+        )
+
+
+def _get_frame_module_abs_path(frame):
+    # type: (FrameType) -> Optional[str]
+    try:
+        return frame.f_code.co_filename
+    except Exception:
+        return None
+
+
+def _should_be_included(
+    is_sentry_sdk_frame,  # type: bool
+    namespace,  # type: Optional[str]
+    in_app_include,  # type: Optional[list[str]]
+    in_app_exclude,  # type: Optional[list[str]]
+    abs_path,  # type: Optional[str]
+    project_root,  # type: Optional[str]
+):
+    # type: (...) -> bool
+    # in_app_include takes precedence over in_app_exclude
+    should_be_included = _module_in_list(namespace, in_app_include)
+    should_be_excluded = _is_external_source(abs_path) or _module_in_list(
+        namespace, in_app_exclude
+    )
+    return not is_sentry_sdk_frame and (
+        should_be_included
+        or (_is_in_project_root(abs_path, project_root) and not should_be_excluded)
+    )
+
+
+def add_query_source(span):
+    # type: (sentry_sdk.tracing.Span) -> None
+    """
+    Adds OTel compatible source code information to the span
+    """
+    client = sentry_sdk.get_client()
+    if not client.is_active():
+        return
+
+    if span.timestamp is None or span.start_timestamp is None:
+        return
+
+    should_add_query_source = client.options.get("enable_db_query_source", True)
+    if not should_add_query_source:
+        return
+
+    duration = span.timestamp - span.start_timestamp
+    threshold = client.options.get("db_query_source_threshold_ms", 0)
+    slow_query = duration / timedelta(milliseconds=1) > threshold
+
+    if not slow_query:
+        return
+
+    project_root = client.options["project_root"]
+    in_app_include = client.options.get("in_app_include")
+    in_app_exclude = client.options.get("in_app_exclude")
+
+    # Find the correct frame
+    frame = sys._getframe()  # type: Union[FrameType, None]
+    while frame is not None:
+        abs_path = _get_frame_module_abs_path(frame)
+
+        try:
+            namespace = frame.f_globals.get("__name__")  # type: Optional[str]
+        except Exception:
+            namespace = None
+
+        is_sentry_sdk_frame = namespace is not None and namespace.startswith(
+            "sentry_sdk."
+        )
+
+        should_be_included = _should_be_included(
+            is_sentry_sdk_frame=is_sentry_sdk_frame,
+            namespace=namespace,
+            in_app_include=in_app_include,
+            in_app_exclude=in_app_exclude,
+            abs_path=abs_path,
+            project_root=project_root,
+        )
+        if should_be_included:
+            break
+
+        frame = frame.f_back
+    else:
+        frame = None
+
+    # Set the data
+    if frame is not None:
+        try:
+            lineno = frame.f_lineno
+        except Exception:
+            lineno = None
+        if lineno is not None:
+            span.set_data(SPANDATA.CODE_LINENO, frame.f_lineno)
+
+        try:
+            namespace = frame.f_globals.get("__name__")
+        except Exception:
+            namespace = None
+        if namespace is not None:
+            span.set_data(SPANDATA.CODE_NAMESPACE, namespace)
+
+        filepath = _get_frame_module_abs_path(frame)
+        if filepath is not None:
+            if namespace is not None:
+                in_app_path = filename_for_module(namespace, filepath)
+            elif project_root is not None and filepath.startswith(project_root):
+                in_app_path = filepath.replace(project_root, "").lstrip(os.sep)
+            else:
+                in_app_path = filepath
+            span.set_data(SPANDATA.CODE_FILEPATH, in_app_path)
+
+        try:
+            code_function = frame.f_code.co_name
+        except Exception:
+            code_function = None
+
+        if code_function is not None:
+            span.set_data(SPANDATA.CODE_FUNCTION, frame.f_code.co_name)
+
+
+def extract_sentrytrace_data(header):
+    # type: (Optional[str]) -> Optional[Dict[str, Union[str, bool, None]]]
+    """
+    Given a `sentry-trace` header string, return a dictionary of data.
+    """
+    if not header:
+        return None
+
+    if header.startswith("00-") and header.endswith("-00"):
+        header = header[3:-3]
+
+    match = SENTRY_TRACE_REGEX.match(header)
+    if not match:
+        return None
+
+    trace_id, parent_span_id, sampled_str = match.groups()
+    parent_sampled = None
+
+    if trace_id:
+        trace_id = "{:032x}".format(int(trace_id, 16))
+    if parent_span_id:
+        parent_span_id = "{:016x}".format(int(parent_span_id, 16))
+    if sampled_str:
+        parent_sampled = sampled_str != "0"
+
+    return {
+        "trace_id": trace_id,
+        "parent_span_id": parent_span_id,
+        "parent_sampled": parent_sampled,
+    }
+
+
+def _format_sql(cursor, sql):
+    # type: (Any, str) -> Optional[str]
+
+    real_sql = None
+
+    # If we're using psycopg2, it could be that we're
+    # looking at a query that uses Composed objects. Use psycopg2's mogrify
+    # function to format the query. We lose per-parameter trimming but gain
+    # accuracy in formatting.
+    try:
+        if hasattr(cursor, "mogrify"):
+            real_sql = cursor.mogrify(sql)
+            if isinstance(real_sql, bytes):
+                real_sql = real_sql.decode(cursor.connection.encoding)
+    except Exception:
+        real_sql = None
+
+    return real_sql or to_string(sql)
+
+
+class PropagationContext:
+    """
+    The PropagationContext represents the data of a trace in Sentry.
+    """
+
+    __slots__ = (
+        "_trace_id",
+        "_span_id",
+        "parent_span_id",
+        "parent_sampled",
+        "dynamic_sampling_context",
+    )
+
+    def __init__(
+        self,
+        trace_id=None,  # type: Optional[str]
+        span_id=None,  # type: Optional[str]
+        parent_span_id=None,  # type: Optional[str]
+        parent_sampled=None,  # type: Optional[bool]
+        dynamic_sampling_context=None,  # type: Optional[Dict[str, str]]
+    ):
+        # type: (...) -> None
+        self._trace_id = trace_id
+        """The trace id of the Sentry trace."""
+
+        self._span_id = span_id
+        """The span id of the currently executing span."""
+
+        self.parent_span_id = parent_span_id
+        """The id of the parent span that started this span.
+        The parent span could also be a span in an upstream service."""
+
+        self.parent_sampled = parent_sampled
+        """Boolean indicator if the parent span was sampled.
+        Important when the parent span originated in an upstream service,
+        because we want to sample the whole trace, or nothing from the trace."""
+
+        self.dynamic_sampling_context = dynamic_sampling_context
+        """Data that is used for dynamic sampling decisions."""
+
+    @classmethod
+    def from_incoming_data(cls, incoming_data):
+        # type: (Dict[str, Any]) -> Optional[PropagationContext]
+        propagation_context = None
+
+        normalized_data = normalize_incoming_data(incoming_data)
+        baggage_header = normalized_data.get(BAGGAGE_HEADER_NAME)
+        if baggage_header:
+            propagation_context = PropagationContext()
+            propagation_context.dynamic_sampling_context = Baggage.from_incoming_header(
+                baggage_header
+            ).dynamic_sampling_context()
+
+        sentry_trace_header = normalized_data.get(SENTRY_TRACE_HEADER_NAME)
+        if sentry_trace_header:
+            sentrytrace_data = extract_sentrytrace_data(sentry_trace_header)
+            if sentrytrace_data is not None:
+                if propagation_context is None:
+                    propagation_context = PropagationContext()
+                propagation_context.update(sentrytrace_data)
+
+        if propagation_context is not None:
+            propagation_context._fill_sample_rand()
+
+        return propagation_context
+
+    @property
+    def trace_id(self):
+        # type: () -> str
+        """The trace id of the Sentry trace."""
+        if not self._trace_id:
+            # New trace, don't fill in sample_rand
+            self._trace_id = uuid.uuid4().hex
+
+        return self._trace_id
+
+    @trace_id.setter
+    def trace_id(self, value):
+        # type: (str) -> None
+        self._trace_id = value
+
+    @property
+    def span_id(self):
+        # type: () -> str
+        """The span id of the currently executed span."""
+        if not self._span_id:
+            self._span_id = uuid.uuid4().hex[16:]
+
+        return self._span_id
+
+    @span_id.setter
+    def span_id(self, value):
+        # type: (str) -> None
+        self._span_id = value
+
+    def update(self, other_dict):
+        # type: (Dict[str, Any]) -> None
+        """
+        Updates the PropagationContext with data from the given dictionary.
+        """
+        for key, value in other_dict.items():
+            try:
+                setattr(self, key, value)
+            except AttributeError:
+                pass
+
+    def __repr__(self):
+        # type: (...) -> str
+        return "<PropagationContext _trace_id={} _span_id={} parent_span_id={} parent_sampled={} dynamic_sampling_context={}>".format(
+            self._trace_id,
+            self._span_id,
+            self.parent_span_id,
+            self.parent_sampled,
+            self.dynamic_sampling_context,
+        )
+
+    def _fill_sample_rand(self):
+        # type: () -> None
+        """
+        Ensure that there is a valid sample_rand value in the dynamic_sampling_context.
+
+        If there is a valid sample_rand value in the dynamic_sampling_context, we keep it.
+        Otherwise, we generate a sample_rand value according to the following:
+
+          - If we have a parent_sampled value and a sample_rate in the DSC, we compute
+            a sample_rand value randomly in the range:
+                - [0, sample_rate) if parent_sampled is True,
+                - or, in the range [sample_rate, 1) if parent_sampled is False.
+
+          - If either parent_sampled or sample_rate is missing, we generate a random
+            value in the range [0, 1).
+
+        The sample_rand is deterministically generated from the trace_id, if present.
+
+        This function does nothing if there is no dynamic_sampling_context.
+        """
+        if self.dynamic_sampling_context is None:
+            return
+
+        sample_rand = try_convert(
+            Decimal, self.dynamic_sampling_context.get("sample_rand")
+        )
+        if sample_rand is not None and 0 <= sample_rand < 1:
+            # sample_rand is present and valid, so don't overwrite it
+            return
+
+        # Get the sample rate and compute the transformation that will map the random value
+        # to the desired range: [0, 1), [0, sample_rate), or [sample_rate, 1).
+        sample_rate = try_convert(
+            float, self.dynamic_sampling_context.get("sample_rate")
+        )
+        lower, upper = _sample_rand_range(self.parent_sampled, sample_rate)
+
+        try:
+            sample_rand = _generate_sample_rand(self.trace_id, interval=(lower, upper))
+        except ValueError:
+            # ValueError is raised if the interval is invalid, i.e. lower >= upper.
+            # lower >= upper might happen if the incoming trace's sampled flag
+            # and sample_rate are inconsistent, e.g. sample_rate=0.0 but sampled=True.
+            # We cannot generate a sensible sample_rand value in this case.
+            logger.debug(
+                f"Could not backfill sample_rand, since parent_sampled={self.parent_sampled} "
+                f"and sample_rate={sample_rate}."
+            )
+            return
+
+        self.dynamic_sampling_context["sample_rand"] = (
+            f"{sample_rand:.6f}"  # noqa: E231
+        )
+
+    def _sample_rand(self):
+        # type: () -> Optional[str]
+        """Convenience method to get the sample_rand value from the dynamic_sampling_context."""
+        if self.dynamic_sampling_context is None:
+            return None
+
+        return self.dynamic_sampling_context.get("sample_rand")
+
+
+class Baggage:
+    """
+    The W3C Baggage header information (see https://www.w3.org/TR/baggage/).
+
+    Before mutating a `Baggage` object, calling code must check that `mutable` is `True`.
+    Mutating a `Baggage` object that has `mutable` set to `False` is not allowed, but
+    it is the caller's responsibility to enforce this restriction.
+    """
+
+    __slots__ = ("sentry_items", "third_party_items", "mutable")
+
+    SENTRY_PREFIX = "sentry-"
+    SENTRY_PREFIX_REGEX = re.compile("^sentry-")
+
+    def __init__(
+        self,
+        sentry_items,  # type: Dict[str, str]
+        third_party_items="",  # type: str
+        mutable=True,  # type: bool
+    ):
+        self.sentry_items = sentry_items
+        self.third_party_items = third_party_items
+        self.mutable = mutable
+
+    @classmethod
+    def from_incoming_header(
+        cls,
+        header,  # type: Optional[str]
+        *,
+        _sample_rand=None,  # type: Optional[str]
+    ):
+        # type: (...) -> Baggage
+        """
+        freeze if incoming header already has sentry baggage
+        """
+        sentry_items = {}
+        third_party_items = ""
+        mutable = True
+
+        if header:
+            for item in header.split(","):
+                if "=" not in item:
+                    continue
+
+                with capture_internal_exceptions():
+                    item = item.strip()
+                    key, val = item.split("=")
+                    if Baggage.SENTRY_PREFIX_REGEX.match(key):
+                        baggage_key = unquote(key.split("-")[1])
+                        sentry_items[baggage_key] = unquote(val)
+                        mutable = False
+                    else:
+                        third_party_items += ("," if third_party_items else "") + item
+
+        if _sample_rand is not None:
+            sentry_items["sample_rand"] = str(_sample_rand)
+            mutable = False
+
+        return Baggage(sentry_items, third_party_items, mutable)
+
+    @classmethod
+    def from_options(cls, scope):
+        # type: (sentry_sdk.scope.Scope) -> Optional[Baggage]
+
+        sentry_items = {}  # type: Dict[str, str]
+        third_party_items = ""
+        mutable = False
+
+        client = sentry_sdk.get_client()
+
+        if not client.is_active() or scope._propagation_context is None:
+            return Baggage(sentry_items)
+
+        options = client.options
+        propagation_context = scope._propagation_context
+
+        if propagation_context is not None:
+            sentry_items["trace_id"] = propagation_context.trace_id
+
+        if options.get("environment"):
+            sentry_items["environment"] = options["environment"]
+
+        if options.get("release"):
+            sentry_items["release"] = options["release"]
+
+        if options.get("dsn"):
+            sentry_items["public_key"] = Dsn(options["dsn"]).public_key
+
+        if options.get("traces_sample_rate"):
+            sentry_items["sample_rate"] = str(options["traces_sample_rate"])
+
+        return Baggage(sentry_items, third_party_items, mutable)
+
+    @classmethod
+    def populate_from_transaction(cls, transaction):
+        # type: (sentry_sdk.tracing.Transaction) -> Baggage
+        """
+        Populate fresh baggage entry with sentry_items and make it immutable
+        if this is the head SDK which originates traces.
+        """
+        client = sentry_sdk.get_client()
+        sentry_items = {}  # type: Dict[str, str]
+
+        if not client.is_active():
+            return Baggage(sentry_items)
+
+        options = client.options or {}
+
+        sentry_items["trace_id"] = transaction.trace_id
+        sentry_items["sample_rand"] = str(transaction._sample_rand)
+
+        if options.get("environment"):
+            sentry_items["environment"] = options["environment"]
+
+        if options.get("release"):
+            sentry_items["release"] = options["release"]
+
+        if options.get("dsn"):
+            sentry_items["public_key"] = Dsn(options["dsn"]).public_key
+
+        if (
+            transaction.name
+            and transaction.source not in LOW_QUALITY_TRANSACTION_SOURCES
+        ):
+            sentry_items["transaction"] = transaction.name
+
+        if transaction.sample_rate is not None:
+            sentry_items["sample_rate"] = str(transaction.sample_rate)
+
+        if transaction.sampled is not None:
+            sentry_items["sampled"] = "true" if transaction.sampled else "false"
+
+        # there's an existing baggage but it was mutable,
+        # which is why we are creating this new baggage.
+        # However, if by chance the user put some sentry items in there, give them precedence.
+        if transaction._baggage and transaction._baggage.sentry_items:
+            sentry_items.update(transaction._baggage.sentry_items)
+
+        return Baggage(sentry_items, mutable=False)
+
+    def freeze(self):
+        # type: () -> None
+        self.mutable = False
+
+    def dynamic_sampling_context(self):
+        # type: () -> Dict[str, str]
+        header = {}
+
+        for key, item in self.sentry_items.items():
+            header[key] = item
+
+        return header
+
+    def serialize(self, include_third_party=False):
+        # type: (bool) -> str
+        items = []
+
+        for key, val in self.sentry_items.items():
+            with capture_internal_exceptions():
+                item = Baggage.SENTRY_PREFIX + quote(key) + "=" + quote(str(val))
+                items.append(item)
+
+        if include_third_party:
+            items.append(self.third_party_items)
+
+        return ",".join(items)
+
+    @staticmethod
+    def strip_sentry_baggage(header):
+        # type: (str) -> str
+        """Remove Sentry baggage from the given header.
+
+        Given a Baggage header, return a new Baggage header with all Sentry baggage items removed.
+        """
+        return ",".join(
+            (
+                item
+                for item in header.split(",")
+                if not Baggage.SENTRY_PREFIX_REGEX.match(item.strip())
+            )
+        )
+
+    def _sample_rand(self):
+        # type: () -> Optional[Decimal]
+        """Convenience method to get the sample_rand value from the sentry_items.
+
+        We validate the value and parse it as a Decimal before returning it. The value is considered
+        valid if it is a Decimal in the range [0, 1).
+        """
+        sample_rand = try_convert(Decimal, self.sentry_items.get("sample_rand"))
+
+        if sample_rand is not None and Decimal(0) <= sample_rand < Decimal(1):
+            return sample_rand
+
+        return None
+
+    def __repr__(self):
+        # type: () -> str
+        return f'<Baggage "{self.serialize(include_third_party=True)}", mutable={self.mutable}>'
+
+
+def should_propagate_trace(client, url):
+    # type: (sentry_sdk.client.BaseClient, str) -> bool
+    """
+    Returns True if url matches trace_propagation_targets configured in the given client. Otherwise, returns False.
+    """
+    trace_propagation_targets = client.options["trace_propagation_targets"]
+
+    if is_sentry_url(client, url):
+        return False
+
+    return match_regex_list(url, trace_propagation_targets, substring_matching=True)
+
+
+def normalize_incoming_data(incoming_data):
+    # type: (Dict[str, Any]) -> Dict[str, Any]
+    """
+    Normalizes incoming data so the keys are all lowercase with dashes instead of underscores and stripped from known prefixes.
+    """
+    data = {}
+    for key, value in incoming_data.items():
+        if key.startswith("HTTP_"):
+            key = key[5:]
+
+        key = key.replace("_", "-").lower()
+        data[key] = value
+
+    return data
+
+
+def start_child_span_decorator(func):
+    # type: (Any) -> Any
+    """
+    Decorator to add child spans for functions.
+
+    See also ``sentry_sdk.tracing.trace()``.
+    """
+    # Asynchronous case
+    if inspect.iscoroutinefunction(func):
+
+        @wraps(func)
+        async def func_with_tracing(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+
+            span = get_current_span()
+
+            if span is None:
+                logger.debug(
+                    "Cannot create a child span for %s. "
+                    "Please start a Sentry transaction before calling this function.",
+                    qualname_from_function(func),
+                )
+                return await func(*args, **kwargs)
+
+            with span.start_child(
+                op=OP.FUNCTION,
+                name=qualname_from_function(func),
+            ):
+                return await func(*args, **kwargs)
+
+        try:
+            func_with_tracing.__signature__ = inspect.signature(func)  # type: ignore[attr-defined]
+        except Exception:
+            pass
+
+    # Synchronous case
+    else:
+
+        @wraps(func)
+        def func_with_tracing(*args, **kwargs):
+            # type: (*Any, **Any) -> Any
+
+            span = get_current_span()
+
+            if span is None:
+                logger.debug(
+                    "Cannot create a child span for %s. "
+                    "Please start a Sentry transaction before calling this function.",
+                    qualname_from_function(func),
+                )
+                return func(*args, **kwargs)
+
+            with span.start_child(
+                op=OP.FUNCTION,
+                name=qualname_from_function(func),
+            ):
+                return func(*args, **kwargs)
+
+        try:
+            func_with_tracing.__signature__ = inspect.signature(func)  # type: ignore[attr-defined]
+        except Exception:
+            pass
+
+    return func_with_tracing
+
+
+def get_current_span(scope=None):
+    # type: (Optional[sentry_sdk.Scope]) -> Optional[Span]
+    """
+    Returns the currently active span if there is one running, otherwise `None`
+    """
+    scope = scope or sentry_sdk.get_current_scope()
+    current_span = scope.span
+    return current_span
+
+
+def _generate_sample_rand(
+    trace_id,  # type: Optional[str]
+    *,
+    interval=(0.0, 1.0),  # type: tuple[float, float]
+):
+    # type: (...) -> Decimal
+    """Generate a sample_rand value from a trace ID.
+
+    The generated value will be pseudorandomly chosen from the provided
+    interval. Specifically, given (lower, upper) = interval, the generated
+    value will be in the range [lower, upper). The value has 6-digit precision,
+    so when printing with .6f, the value will never be rounded up.
+
+    The pseudorandom number generator is seeded with the trace ID.
+    """
+    lower, upper = interval
+    if not lower < upper:  # using `if lower >= upper` would handle NaNs incorrectly
+        raise ValueError("Invalid interval: lower must be less than upper")
+
+    rng = Random(trace_id)
+    sample_rand = upper
+    while sample_rand >= upper:
+        sample_rand = rng.uniform(lower, upper)
+
+    # Round down to exactly six decimal-digit precision.
+    # Setting the context is needed to avoid an InvalidOperation exception
+    # in case the user has changed the default precision.
+    return Decimal(sample_rand).quantize(
+        Decimal("0.000001"), rounding=ROUND_DOWN, context=Context(prec=6)
+    )
+
+
+def _sample_rand_range(parent_sampled, sample_rate):
+    # type: (Optional[bool], Optional[float]) -> tuple[float, float]
+    """
+    Compute the lower (inclusive) and upper (exclusive) bounds of the range of values
+    that a generated sample_rand value must fall into, given the parent_sampled and
+    sample_rate values.
+    """
+    if parent_sampled is None or sample_rate is None:
+        return 0.0, 1.0
+    elif parent_sampled is True:
+        return 0.0, sample_rate
+    else:  # parent_sampled is False
+        return sample_rate, 1.0
+
+
+# Circular imports
+from sentry_sdk.tracing import (
+    BAGGAGE_HEADER_NAME,
+    LOW_QUALITY_TRANSACTION_SOURCES,
+    SENTRY_TRACE_HEADER_NAME,
+)
+
+if TYPE_CHECKING:
+    from sentry_sdk.tracing import Span
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/transport.py b/.venv/lib/python3.12/site-packages/sentry_sdk/transport.py
new file mode 100644
index 00000000..efc955ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/transport.py
@@ -0,0 +1,910 @@
+from abc import ABC, abstractmethod
+import io
+import os
+import gzip
+import socket
+import ssl
+import time
+import warnings
+from datetime import datetime, timedelta, timezone
+from collections import defaultdict
+from urllib.request import getproxies
+
+try:
+    import brotli  # type: ignore
+except ImportError:
+    brotli = None
+
+import urllib3
+import certifi
+
+import sentry_sdk
+from sentry_sdk.consts import EndpointType
+from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions
+from sentry_sdk.worker import BackgroundWorker
+from sentry_sdk.envelope import Envelope, Item, PayloadRef
+
+from typing import TYPE_CHECKING, cast, List, Dict
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Callable
+    from typing import DefaultDict
+    from typing import Iterable
+    from typing import Mapping
+    from typing import Optional
+    from typing import Self
+    from typing import Tuple
+    from typing import Type
+    from typing import Union
+
+    from urllib3.poolmanager import PoolManager
+    from urllib3.poolmanager import ProxyManager
+
+    from sentry_sdk._types import Event, EventDataCategory
+
+KEEP_ALIVE_SOCKET_OPTIONS = []
+for option in [
+    (socket.SOL_SOCKET, lambda: getattr(socket, "SO_KEEPALIVE"), 1),  # noqa: B009
+    (socket.SOL_TCP, lambda: getattr(socket, "TCP_KEEPIDLE"), 45),  # noqa: B009
+    (socket.SOL_TCP, lambda: getattr(socket, "TCP_KEEPINTVL"), 10),  # noqa: B009
+    (socket.SOL_TCP, lambda: getattr(socket, "TCP_KEEPCNT"), 6),  # noqa: B009
+]:
+    try:
+        KEEP_ALIVE_SOCKET_OPTIONS.append((option[0], option[1](), option[2]))
+    except AttributeError:
+        # a specific option might not be available on specific systems,
+        # e.g. TCP_KEEPIDLE doesn't exist on macOS
+        pass
+
+
+class Transport(ABC):
+    """Baseclass for all transports.
+
+    A transport is used to send an event to sentry.
+    """
+
+    parsed_dsn = None  # type: Optional[Dsn]
+
+    def __init__(self, options=None):
+        # type: (Self, Optional[Dict[str, Any]]) -> None
+        self.options = options
+        if options and options["dsn"] is not None and options["dsn"]:
+            self.parsed_dsn = Dsn(options["dsn"])
+        else:
+            self.parsed_dsn = None
+
+    def capture_event(self, event):
+        # type: (Self, Event) -> None
+        """
+        DEPRECATED: Please use capture_envelope instead.
+
+        This gets invoked with the event dictionary when an event should
+        be sent to sentry.
+        """
+
+        warnings.warn(
+            "capture_event is deprecated, please use capture_envelope instead!",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+
+        envelope = Envelope()
+        envelope.add_event(event)
+        self.capture_envelope(envelope)
+
+    @abstractmethod
+    def capture_envelope(self, envelope):
+        # type: (Self, Envelope) -> None
+        """
+        Send an envelope to Sentry.
+
+        Envelopes are a data container format that can hold any type of data
+        submitted to Sentry. We use it to send all event data (including errors,
+        transactions, crons check-ins, etc.) to Sentry.
+        """
+        pass
+
+    def flush(
+        self,
+        timeout,
+        callback=None,
+    ):
+        # type: (Self, float, Optional[Any]) -> None
+        """
+        Wait `timeout` seconds for the current events to be sent out.
+
+        The default implementation is a no-op, since this method may only be relevant to some transports.
+        Subclasses should override this method if necessary.
+        """
+        return None
+
+    def kill(self):
+        # type: (Self) -> None
+        """
+        Forcefully kills the transport.
+
+        The default implementation is a no-op, since this method may only be relevant to some transports.
+        Subclasses should override this method if necessary.
+        """
+        return None
+
+    def record_lost_event(
+        self,
+        reason,  # type: str
+        data_category=None,  # type: Optional[EventDataCategory]
+        item=None,  # type: Optional[Item]
+        *,
+        quantity=1,  # type: int
+    ):
+        # type: (...) -> None
+        """This increments a counter for event loss by reason and
+        data category by the given positive-int quantity (default 1).
+
+        If an item is provided, the data category and quantity are
+        extracted from the item, and the values passed for
+        data_category and quantity are ignored.
+
+        When recording a lost transaction via data_category="transaction",
+        the calling code should also record the lost spans via this method.
+        When recording lost spans, `quantity` should be set to the number
+        of contained spans, plus one for the transaction itself. When
+        passing an Item containing a transaction via the `item` parameter,
+        this method automatically records the lost spans.
+        """
+        return None
+
+    def is_healthy(self):
+        # type: (Self) -> bool
+        return True
+
+    def __del__(self):
+        # type: (Self) -> None
+        try:
+            self.kill()
+        except Exception:
+            pass
+
+
+def _parse_rate_limits(header, now=None):
+    # type: (str, Optional[datetime]) -> Iterable[Tuple[Optional[EventDataCategory], datetime]]
+    if now is None:
+        now = datetime.now(timezone.utc)
+
+    for limit in header.split(","):
+        try:
+            parameters = limit.strip().split(":")
+            retry_after_val, categories = parameters[:2]
+
+            retry_after = now + timedelta(seconds=int(retry_after_val))
+            for category in categories and categories.split(";") or (None,):
+                if category == "metric_bucket":
+                    try:
+                        namespaces = parameters[4].split(";")
+                    except IndexError:
+                        namespaces = []
+
+                    if not namespaces or "custom" in namespaces:
+                        yield category, retry_after  # type: ignore
+
+                else:
+                    yield category, retry_after  # type: ignore
+        except (LookupError, ValueError):
+            continue
+
+
+class BaseHttpTransport(Transport):
+    """The base HTTP transport."""
+
+    def __init__(self, options):
+        # type: (Self, Dict[str, Any]) -> None
+        from sentry_sdk.consts import VERSION
+
+        Transport.__init__(self, options)
+        assert self.parsed_dsn is not None
+        self.options = options  # type: Dict[str, Any]
+        self._worker = BackgroundWorker(queue_size=options["transport_queue_size"])
+        self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
+        self._disabled_until = {}  # type: Dict[Optional[EventDataCategory], datetime]
+        # We only use this Retry() class for the `get_retry_after` method it exposes
+        self._retry = urllib3.util.Retry()
+        self._discarded_events = defaultdict(
+            int
+        )  # type: DefaultDict[Tuple[EventDataCategory, str], int]
+        self._last_client_report_sent = time.time()
+
+        self._pool = self._make_pool()
+
+        # Backwards compatibility for deprecated `self.hub_class` attribute
+        self._hub_cls = sentry_sdk.Hub
+
+        experiments = options.get("_experiments", {})
+        compression_level = experiments.get(
+            "transport_compression_level",
+            experiments.get("transport_zlib_compression_level"),
+        )
+        compression_algo = experiments.get(
+            "transport_compression_algo",
+            (
+                "gzip"
+                # if only compression level is set, assume gzip for backwards compatibility
+                # if we don't have brotli available, fallback to gzip
+                if compression_level is not None or brotli is None
+                else "br"
+            ),
+        )
+
+        if compression_algo == "br" and brotli is None:
+            logger.warning(
+                "You asked for brotli compression without the Brotli module, falling back to gzip -9"
+            )
+            compression_algo = "gzip"
+            compression_level = None
+
+        if compression_algo not in ("br", "gzip"):
+            logger.warning(
+                "Unknown compression algo %s, disabling compression", compression_algo
+            )
+            self._compression_level = 0
+            self._compression_algo = None
+        else:
+            self._compression_algo = compression_algo
+
+        if compression_level is not None:
+            self._compression_level = compression_level
+        elif self._compression_algo == "gzip":
+            self._compression_level = 9
+        elif self._compression_algo == "br":
+            self._compression_level = 4
+
+    def record_lost_event(
+        self,
+        reason,  # type: str
+        data_category=None,  # type: Optional[EventDataCategory]
+        item=None,  # type: Optional[Item]
+        *,
+        quantity=1,  # type: int
+    ):
+        # type: (...) -> None
+        if not self.options["send_client_reports"]:
+            return
+
+        if item is not None:
+            data_category = item.data_category
+            quantity = 1  # If an item is provided, we always count it as 1 (except for attachments, handled below).
+
+            if data_category == "transaction":
+                # Also record the lost spans
+                event = item.get_transaction_event() or {}
+
+                # +1 for the transaction itself
+                span_count = (
+                    len(cast(List[Dict[str, object]], event.get("spans") or [])) + 1
+                )
+                self.record_lost_event(reason, "span", quantity=span_count)
+
+            elif data_category == "attachment":
+                # quantity of 0 is actually 1 as we do not want to count
+                # empty attachments as actually empty.
+                quantity = len(item.get_bytes()) or 1
+
+        elif data_category is None:
+            raise TypeError("data category not provided")
+
+        self._discarded_events[data_category, reason] += quantity
+
+    def _get_header_value(self, response, header):
+        # type: (Self, Any, str) -> Optional[str]
+        return response.headers.get(header)
+
+    def _update_rate_limits(self, response):
+        # type: (Self, Union[urllib3.BaseHTTPResponse, httpcore.Response]) -> None
+
+        # new sentries with more rate limit insights.  We honor this header
+        # no matter of the status code to update our internal rate limits.
+        header = self._get_header_value(response, "x-sentry-rate-limits")
+        if header:
+            logger.warning("Rate-limited via x-sentry-rate-limits")
+            self._disabled_until.update(_parse_rate_limits(header))
+
+        # old sentries only communicate global rate limit hits via the
+        # retry-after header on 429.  This header can also be emitted on new
+        # sentries if a proxy in front wants to globally slow things down.
+        elif response.status == 429:
+            logger.warning("Rate-limited via 429")
+            retry_after_value = self._get_header_value(response, "Retry-After")
+            retry_after = (
+                self._retry.parse_retry_after(retry_after_value)
+                if retry_after_value is not None
+                else None
+            ) or 60
+            self._disabled_until[None] = datetime.now(timezone.utc) + timedelta(
+                seconds=retry_after
+            )
+
+    def _send_request(
+        self,
+        body,
+        headers,
+        endpoint_type=EndpointType.ENVELOPE,
+        envelope=None,
+    ):
+        # type: (Self, bytes, Dict[str, str], EndpointType, Optional[Envelope]) -> None
+
+        def record_loss(reason):
+            # type: (str) -> None
+            if envelope is None:
+                self.record_lost_event(reason, data_category="error")
+            else:
+                for item in envelope.items:
+                    self.record_lost_event(reason, item=item)
+
+        headers.update(
+            {
+                "User-Agent": str(self._auth.client),
+                "X-Sentry-Auth": str(self._auth.to_header()),
+            }
+        )
+        try:
+            response = self._request(
+                "POST",
+                endpoint_type,
+                body,
+                headers,
+            )
+        except Exception:
+            self.on_dropped_event("network")
+            record_loss("network_error")
+            raise
+
+        try:
+            self._update_rate_limits(response)
+
+            if response.status == 429:
+                # if we hit a 429.  Something was rate limited but we already
+                # acted on this in `self._update_rate_limits`.  Note that we
+                # do not want to record event loss here as we will have recorded
+                # an outcome in relay already.
+                self.on_dropped_event("status_429")
+                pass
+
+            elif response.status >= 300 or response.status < 200:
+                logger.error(
+                    "Unexpected status code: %s (body: %s)",
+                    response.status,
+                    getattr(response, "data", getattr(response, "content", None)),
+                )
+                self.on_dropped_event("status_{}".format(response.status))
+                record_loss("network_error")
+        finally:
+            response.close()
+
+    def on_dropped_event(self, _reason):
+        # type: (Self, str) -> None
+        return None
+
+    def _fetch_pending_client_report(self, force=False, interval=60):
+        # type: (Self, bool, int) -> Optional[Item]
+        if not self.options["send_client_reports"]:
+            return None
+
+        if not (force or self._last_client_report_sent < time.time() - interval):
+            return None
+
+        discarded_events = self._discarded_events
+        self._discarded_events = defaultdict(int)
+        self._last_client_report_sent = time.time()
+
+        if not discarded_events:
+            return None
+
+        return Item(
+            PayloadRef(
+                json={
+                    "timestamp": time.time(),
+                    "discarded_events": [
+                        {"reason": reason, "category": category, "quantity": quantity}
+                        for (
+                            (category, reason),
+                            quantity,
+                        ) in discarded_events.items()
+                    ],
+                }
+            ),
+            type="client_report",
+        )
+
+    def _flush_client_reports(self, force=False):
+        # type: (Self, bool) -> None
+        client_report = self._fetch_pending_client_report(force=force, interval=60)
+        if client_report is not None:
+            self.capture_envelope(Envelope(items=[client_report]))
+
+    def _check_disabled(self, category):
+        # type: (str) -> bool
+        def _disabled(bucket):
+            # type: (Any) -> bool
+
+            # The envelope item type used for metrics is statsd
+            # whereas the rate limit category is metric_bucket
+            if bucket == "statsd":
+                bucket = "metric_bucket"
+
+            ts = self._disabled_until.get(bucket)
+            return ts is not None and ts > datetime.now(timezone.utc)
+
+        return _disabled(category) or _disabled(None)
+
+    def _is_rate_limited(self):
+        # type: (Self) -> bool
+        return any(
+            ts > datetime.now(timezone.utc) for ts in self._disabled_until.values()
+        )
+
+    def _is_worker_full(self):
+        # type: (Self) -> bool
+        return self._worker.full()
+
+    def is_healthy(self):
+        # type: (Self) -> bool
+        return not (self._is_worker_full() or self._is_rate_limited())
+
+    def _send_envelope(self, envelope):
+        # type: (Self, Envelope) -> None
+
+        # remove all items from the envelope which are over quota
+        new_items = []
+        for item in envelope.items:
+            if self._check_disabled(item.data_category):
+                if item.data_category in ("transaction", "error", "default", "statsd"):
+                    self.on_dropped_event("self_rate_limits")
+                self.record_lost_event("ratelimit_backoff", item=item)
+            else:
+                new_items.append(item)
+
+        # Since we're modifying the envelope here make a copy so that others
+        # that hold references do not see their envelope modified.
+        envelope = Envelope(headers=envelope.headers, items=new_items)
+
+        if not envelope.items:
+            return None
+
+        # since we're already in the business of sending out an envelope here
+        # check if we have one pending for the stats session envelopes so we
+        # can attach it to this enveloped scheduled for sending.  This will
+        # currently typically attach the client report to the most recent
+        # session update.
+        client_report_item = self._fetch_pending_client_report(interval=30)
+        if client_report_item is not None:
+            envelope.items.append(client_report_item)
+
+        content_encoding, body = self._serialize_envelope(envelope)
+
+        assert self.parsed_dsn is not None
+        logger.debug(
+            "Sending envelope [%s] project:%s host:%s",
+            envelope.description,
+            self.parsed_dsn.project_id,
+            self.parsed_dsn.host,
+        )
+
+        headers = {
+            "Content-Type": "application/x-sentry-envelope",
+        }
+        if content_encoding:
+            headers["Content-Encoding"] = content_encoding
+
+        self._send_request(
+            body.getvalue(),
+            headers=headers,
+            endpoint_type=EndpointType.ENVELOPE,
+            envelope=envelope,
+        )
+        return None
+
+    def _serialize_envelope(self, envelope):
+        # type: (Self, Envelope) -> tuple[Optional[str], io.BytesIO]
+        content_encoding = None
+        body = io.BytesIO()
+        if self._compression_level == 0 or self._compression_algo is None:
+            envelope.serialize_into(body)
+        else:
+            content_encoding = self._compression_algo
+            if self._compression_algo == "br" and brotli is not None:
+                body.write(
+                    brotli.compress(
+                        envelope.serialize(), quality=self._compression_level
+                    )
+                )
+            else:  # assume gzip as we sanitize the algo value in init
+                with gzip.GzipFile(
+                    fileobj=body, mode="w", compresslevel=self._compression_level
+                ) as f:
+                    envelope.serialize_into(f)
+
+        return content_encoding, body
+
+    def _get_pool_options(self):
+        # type: (Self) -> Dict[str, Any]
+        raise NotImplementedError()
+
+    def _in_no_proxy(self, parsed_dsn):
+        # type: (Self, Dsn) -> bool
+        no_proxy = getproxies().get("no")
+        if not no_proxy:
+            return False
+        for host in no_proxy.split(","):
+            host = host.strip()
+            if parsed_dsn.host.endswith(host) or parsed_dsn.netloc.endswith(host):
+                return True
+        return False
+
+    def _make_pool(self):
+        # type: (Self) -> Union[PoolManager, ProxyManager, httpcore.SOCKSProxy, httpcore.HTTPProxy, httpcore.ConnectionPool]
+        raise NotImplementedError()
+
+    def _request(
+        self,
+        method,
+        endpoint_type,
+        body,
+        headers,
+    ):
+        # type: (Self, str, EndpointType, Any, Mapping[str, str]) -> Union[urllib3.BaseHTTPResponse, httpcore.Response]
+        raise NotImplementedError()
+
+    def capture_envelope(
+        self, envelope  # type: Envelope
+    ):
+        # type: (...) -> None
+        def send_envelope_wrapper():
+            # type: () -> None
+            with capture_internal_exceptions():
+                self._send_envelope(envelope)
+                self._flush_client_reports()
+
+        if not self._worker.submit(send_envelope_wrapper):
+            self.on_dropped_event("full_queue")
+            for item in envelope.items:
+                self.record_lost_event("queue_overflow", item=item)
+
+    def flush(
+        self,
+        timeout,
+        callback=None,
+    ):
+        # type: (Self, float, Optional[Callable[[int, float], None]]) -> None
+        logger.debug("Flushing HTTP transport")
+
+        if timeout > 0:
+            self._worker.submit(lambda: self._flush_client_reports(force=True))
+            self._worker.flush(timeout, callback)
+
+    def kill(self):
+        # type: (Self) -> None
+        logger.debug("Killing HTTP transport")
+        self._worker.kill()
+
+    @staticmethod
+    def _warn_hub_cls():
+        # type: () -> None
+        """Convenience method to warn users about the deprecation of the `hub_cls` attribute."""
+        warnings.warn(
+            "The `hub_cls` attribute is deprecated and will be removed in a future release.",
+            DeprecationWarning,
+            stacklevel=3,
+        )
+
+    @property
+    def hub_cls(self):
+        # type: (Self) -> type[sentry_sdk.Hub]
+        """DEPRECATED: This attribute is deprecated and will be removed in a future release."""
+        HttpTransport._warn_hub_cls()
+        return self._hub_cls
+
+    @hub_cls.setter
+    def hub_cls(self, value):
+        # type: (Self, type[sentry_sdk.Hub]) -> None
+        """DEPRECATED: This attribute is deprecated and will be removed in a future release."""
+        HttpTransport._warn_hub_cls()
+        self._hub_cls = value
+
+
+class HttpTransport(BaseHttpTransport):
+    if TYPE_CHECKING:
+        _pool: Union[PoolManager, ProxyManager]
+
+    def _get_pool_options(self):
+        # type: (Self) -> Dict[str, Any]
+
+        num_pools = self.options.get("_experiments", {}).get("transport_num_pools")
+        options = {
+            "num_pools": 2 if num_pools is None else int(num_pools),
+            "cert_reqs": "CERT_REQUIRED",
+        }
+
+        socket_options = None  # type: Optional[List[Tuple[int, int, int | bytes]]]
+
+        if self.options["socket_options"] is not None:
+            socket_options = self.options["socket_options"]
+
+        if self.options["keep_alive"]:
+            if socket_options is None:
+                socket_options = []
+
+            used_options = {(o[0], o[1]) for o in socket_options}
+            for default_option in KEEP_ALIVE_SOCKET_OPTIONS:
+                if (default_option[0], default_option[1]) not in used_options:
+                    socket_options.append(default_option)
+
+        if socket_options is not None:
+            options["socket_options"] = socket_options
+
+        options["ca_certs"] = (
+            self.options["ca_certs"]  # User-provided bundle from the SDK init
+            or os.environ.get("SSL_CERT_FILE")
+            or os.environ.get("REQUESTS_CA_BUNDLE")
+            or certifi.where()
+        )
+
+        options["cert_file"] = self.options["cert_file"] or os.environ.get(
+            "CLIENT_CERT_FILE"
+        )
+        options["key_file"] = self.options["key_file"] or os.environ.get(
+            "CLIENT_KEY_FILE"
+        )
+
+        return options
+
+    def _make_pool(self):
+        # type: (Self) -> Union[PoolManager, ProxyManager]
+        if self.parsed_dsn is None:
+            raise ValueError("Cannot create HTTP-based transport without valid DSN")
+
+        proxy = None
+        no_proxy = self._in_no_proxy(self.parsed_dsn)
+
+        # try HTTPS first
+        https_proxy = self.options["https_proxy"]
+        if self.parsed_dsn.scheme == "https" and (https_proxy != ""):
+            proxy = https_proxy or (not no_proxy and getproxies().get("https"))
+
+        # maybe fallback to HTTP proxy
+        http_proxy = self.options["http_proxy"]
+        if not proxy and (http_proxy != ""):
+            proxy = http_proxy or (not no_proxy and getproxies().get("http"))
+
+        opts = self._get_pool_options()
+
+        if proxy:
+            proxy_headers = self.options["proxy_headers"]
+            if proxy_headers:
+                opts["proxy_headers"] = proxy_headers
+
+            if proxy.startswith("socks"):
+                use_socks_proxy = True
+                try:
+                    # Check if PySocks dependency is available
+                    from urllib3.contrib.socks import SOCKSProxyManager
+                except ImportError:
+                    use_socks_proxy = False
+                    logger.warning(
+                        "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support. Please add `PySocks` (or `urllib3` with the `[socks]` extra) to your dependencies.",
+                        proxy,
+                    )
+
+                if use_socks_proxy:
+                    return SOCKSProxyManager(proxy, **opts)
+                else:
+                    return urllib3.PoolManager(**opts)
+            else:
+                return urllib3.ProxyManager(proxy, **opts)
+        else:
+            return urllib3.PoolManager(**opts)
+
+    def _request(
+        self,
+        method,
+        endpoint_type,
+        body,
+        headers,
+    ):
+        # type: (Self, str, EndpointType, Any, Mapping[str, str]) -> urllib3.BaseHTTPResponse
+        return self._pool.request(
+            method,
+            self._auth.get_api_url(endpoint_type),
+            body=body,
+            headers=headers,
+        )
+
+
+try:
+    import httpcore
+    import h2  # noqa: F401
+except ImportError:
+    # Sorry, no Http2Transport for you
+    class Http2Transport(HttpTransport):
+        def __init__(self, options):
+            # type: (Self, Dict[str, Any]) -> None
+            super().__init__(options)
+            logger.warning(
+                "You tried to use HTTP2Transport but don't have httpcore[http2] installed. Falling back to HTTPTransport."
+            )
+
+else:
+
+    class Http2Transport(BaseHttpTransport):  # type: ignore
+        """The HTTP2 transport based on httpcore."""
+
+        if TYPE_CHECKING:
+            _pool: Union[
+                httpcore.SOCKSProxy, httpcore.HTTPProxy, httpcore.ConnectionPool
+            ]
+
+        def _get_header_value(self, response, header):
+            # type: (Self, httpcore.Response, str) -> Optional[str]
+            return next(
+                (
+                    val.decode("ascii")
+                    for key, val in response.headers
+                    if key.decode("ascii").lower() == header
+                ),
+                None,
+            )
+
+        def _request(
+            self,
+            method,
+            endpoint_type,
+            body,
+            headers,
+        ):
+            # type: (Self, str, EndpointType, Any, Mapping[str, str]) -> httpcore.Response
+            response = self._pool.request(
+                method,
+                self._auth.get_api_url(endpoint_type),
+                content=body,
+                headers=headers,  # type: ignore
+            )
+            return response
+
+        def _get_pool_options(self):
+            # type: (Self) -> Dict[str, Any]
+            options = {
+                "http2": self.parsed_dsn is not None
+                and self.parsed_dsn.scheme == "https",
+                "retries": 3,
+            }  # type: Dict[str, Any]
+
+            socket_options = (
+                self.options["socket_options"]
+                if self.options["socket_options"] is not None
+                else []
+            )
+
+            used_options = {(o[0], o[1]) for o in socket_options}
+            for default_option in KEEP_ALIVE_SOCKET_OPTIONS:
+                if (default_option[0], default_option[1]) not in used_options:
+                    socket_options.append(default_option)
+
+            options["socket_options"] = socket_options
+
+            ssl_context = ssl.create_default_context()
+            ssl_context.load_verify_locations(
+                self.options["ca_certs"]  # User-provided bundle from the SDK init
+                or os.environ.get("SSL_CERT_FILE")
+                or os.environ.get("REQUESTS_CA_BUNDLE")
+                or certifi.where()
+            )
+            cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE")
+            key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE")
+            if cert_file is not None:
+                ssl_context.load_cert_chain(cert_file, key_file)
+
+            options["ssl_context"] = ssl_context
+
+            return options
+
+        def _make_pool(self):
+            # type: (Self) -> Union[httpcore.SOCKSProxy, httpcore.HTTPProxy, httpcore.ConnectionPool]
+            if self.parsed_dsn is None:
+                raise ValueError("Cannot create HTTP-based transport without valid DSN")
+            proxy = None
+            no_proxy = self._in_no_proxy(self.parsed_dsn)
+
+            # try HTTPS first
+            https_proxy = self.options["https_proxy"]
+            if self.parsed_dsn.scheme == "https" and (https_proxy != ""):
+                proxy = https_proxy or (not no_proxy and getproxies().get("https"))
+
+            # maybe fallback to HTTP proxy
+            http_proxy = self.options["http_proxy"]
+            if not proxy and (http_proxy != ""):
+                proxy = http_proxy or (not no_proxy and getproxies().get("http"))
+
+            opts = self._get_pool_options()
+
+            if proxy:
+                proxy_headers = self.options["proxy_headers"]
+                if proxy_headers:
+                    opts["proxy_headers"] = proxy_headers
+
+                if proxy.startswith("socks"):
+                    try:
+                        if "socket_options" in opts:
+                            socket_options = opts.pop("socket_options")
+                            if socket_options:
+                                logger.warning(
+                                    "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options."
+                                )
+                        return httpcore.SOCKSProxy(proxy_url=proxy, **opts)
+                    except RuntimeError:
+                        logger.warning(
+                            "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.",
+                            proxy,
+                        )
+                else:
+                    return httpcore.HTTPProxy(proxy_url=proxy, **opts)
+
+            return httpcore.ConnectionPool(**opts)
+
+
+class _FunctionTransport(Transport):
+    """
+    DEPRECATED: Users wishing to provide a custom transport should subclass
+    the Transport class, rather than providing a function.
+    """
+
+    def __init__(
+        self, func  # type: Callable[[Event], None]
+    ):
+        # type: (...) -> None
+        Transport.__init__(self)
+        self._func = func
+
+    def capture_event(
+        self, event  # type: Event
+    ):
+        # type: (...) -> None
+        self._func(event)
+        return None
+
+    def capture_envelope(self, envelope: Envelope) -> None:
+        # Since function transports expect to be called with an event, we need
+        # to iterate over the envelope and call the function for each event, via
+        # the deprecated capture_event method.
+        event = envelope.get_event()
+        if event is not None:
+            self.capture_event(event)
+
+
+def make_transport(options):
+    # type: (Dict[str, Any]) -> Optional[Transport]
+    ref_transport = options["transport"]
+
+    use_http2_transport = options.get("_experiments", {}).get("transport_http2", False)
+
+    # By default, we use the http transport class
+    transport_cls = (
+        Http2Transport if use_http2_transport else HttpTransport
+    )  # type: Type[Transport]
+
+    if isinstance(ref_transport, Transport):
+        return ref_transport
+    elif isinstance(ref_transport, type) and issubclass(ref_transport, Transport):
+        transport_cls = ref_transport
+    elif callable(ref_transport):
+        warnings.warn(
+            "Function transports are deprecated and will be removed in a future release."
+            "Please provide a Transport instance or subclass, instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return _FunctionTransport(ref_transport)
+
+    # if a transport class is given only instantiate it if the dsn is not
+    # empty or None
+    if options["dsn"]:
+        return transport_cls(options)
+
+    return None
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/types.py b/.venv/lib/python3.12/site-packages/sentry_sdk/types.py
new file mode 100644
index 00000000..a81be8f1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/types.py
@@ -0,0 +1,24 @@
+"""
+This module contains type definitions for the Sentry SDK's public API.
+The types are re-exported from the internal module `sentry_sdk._types`.
+
+Disclaimer: Since types are a form of documentation, type definitions
+may change in minor releases. Removing a type would be considered a
+breaking change, and so we will only remove type definitions in major
+releases.
+"""
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from sentry_sdk._types import Event, EventDataCategory, Hint
+else:
+    from typing import Any
+
+    # The lines below allow the types to be imported from outside `if TYPE_CHECKING`
+    # guards. The types in this module are only intended to be used for type hints.
+    Event = Any
+    EventDataCategory = Any
+    Hint = Any
+
+__all__ = ("Event", "EventDataCategory", "Hint")
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/utils.py b/.venv/lib/python3.12/site-packages/sentry_sdk/utils.py
new file mode 100644
index 00000000..89b2354c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/utils.py
@@ -0,0 +1,1907 @@
+import base64
+import json
+import linecache
+import logging
+import math
+import os
+import random
+import re
+import subprocess
+import sys
+import threading
+import time
+from collections import namedtuple
+from datetime import datetime, timezone
+from decimal import Decimal
+from functools import partial, partialmethod, wraps
+from numbers import Real
+from urllib.parse import parse_qs, unquote, urlencode, urlsplit, urlunsplit
+
+try:
+    # Python 3.11
+    from builtins import BaseExceptionGroup
+except ImportError:
+    # Python 3.10 and below
+    BaseExceptionGroup = None  # type: ignore
+
+import sentry_sdk
+from sentry_sdk._compat import PY37
+from sentry_sdk.consts import (
+    DEFAULT_ADD_FULL_STACK,
+    DEFAULT_MAX_STACK_FRAMES,
+    DEFAULT_MAX_VALUE_LENGTH,
+    EndpointType,
+)
+from sentry_sdk._types import Annotated, AnnotatedValue, SENSITIVE_DATA_SUBSTITUTE
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from types import FrameType, TracebackType
+    from typing import (
+        Any,
+        Callable,
+        cast,
+        ContextManager,
+        Dict,
+        Iterator,
+        List,
+        NoReturn,
+        Optional,
+        overload,
+        ParamSpec,
+        Set,
+        Tuple,
+        Type,
+        TypeVar,
+        Union,
+    )
+
+    from gevent.hub import Hub
+
+    from sentry_sdk._types import Event, ExcInfo
+
+    P = ParamSpec("P")
+    R = TypeVar("R")
+
+
+epoch = datetime(1970, 1, 1)
+
+# The logger is created here but initialized in the debug support module
+logger = logging.getLogger("sentry_sdk.errors")
+
+_installed_modules = None
+
+BASE64_ALPHABET = re.compile(r"^[a-zA-Z0-9/+=]*$")
+
+FALSY_ENV_VALUES = frozenset(("false", "f", "n", "no", "off", "0"))
+TRUTHY_ENV_VALUES = frozenset(("true", "t", "y", "yes", "on", "1"))
+
+
+def env_to_bool(value, *, strict=False):
+    # type: (Any, Optional[bool]) -> bool | None
+    """Casts an ENV variable value to boolean using the constants defined above.
+    In strict mode, it may return None if the value doesn't match any of the predefined values.
+    """
+    normalized = str(value).lower() if value is not None else None
+
+    if normalized in FALSY_ENV_VALUES:
+        return False
+
+    if normalized in TRUTHY_ENV_VALUES:
+        return True
+
+    return None if strict else bool(value)
+
+
+def json_dumps(data):
+    # type: (Any) -> bytes
+    """Serialize data into a compact JSON representation encoded as UTF-8."""
+    return json.dumps(data, allow_nan=False, separators=(",", ":")).encode("utf-8")
+
+
+def get_git_revision():
+    # type: () -> Optional[str]
+    try:
+        with open(os.path.devnull, "w+") as null:
+            # prevent command prompt windows from popping up on windows
+            startupinfo = None
+            if sys.platform == "win32" or sys.platform == "cygwin":
+                startupinfo = subprocess.STARTUPINFO()
+                startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+
+            revision = (
+                subprocess.Popen(
+                    ["git", "rev-parse", "HEAD"],
+                    startupinfo=startupinfo,
+                    stdout=subprocess.PIPE,
+                    stderr=null,
+                    stdin=null,
+                )
+                .communicate()[0]
+                .strip()
+                .decode("utf-8")
+            )
+    except (OSError, IOError, FileNotFoundError):
+        return None
+
+    return revision
+
+
+def get_default_release():
+    # type: () -> Optional[str]
+    """Try to guess a default release."""
+    release = os.environ.get("SENTRY_RELEASE")
+    if release:
+        return release
+
+    release = get_git_revision()
+    if release:
+        return release
+
+    for var in (
+        "HEROKU_SLUG_COMMIT",
+        "SOURCE_VERSION",
+        "CODEBUILD_RESOLVED_SOURCE_VERSION",
+        "CIRCLE_SHA1",
+        "GAE_DEPLOYMENT_ID",
+    ):
+        release = os.environ.get(var)
+        if release:
+            return release
+    return None
+
+
+def get_sdk_name(installed_integrations):
+    # type: (List[str]) -> str
+    """Return the SDK name including the name of the used web framework."""
+
+    # Note: I can not use for example sentry_sdk.integrations.django.DjangoIntegration.identifier
+    # here because if django is not installed the integration is not accessible.
+    framework_integrations = [
+        "django",
+        "flask",
+        "fastapi",
+        "bottle",
+        "falcon",
+        "quart",
+        "sanic",
+        "starlette",
+        "litestar",
+        "starlite",
+        "chalice",
+        "serverless",
+        "pyramid",
+        "tornado",
+        "aiohttp",
+        "aws_lambda",
+        "gcp",
+        "beam",
+        "asgi",
+        "wsgi",
+    ]
+
+    for integration in framework_integrations:
+        if integration in installed_integrations:
+            return "sentry.python.{}".format(integration)
+
+    return "sentry.python"
+
+
+class CaptureInternalException:
+    __slots__ = ()
+
+    def __enter__(self):
+        # type: () -> ContextManager[Any]
+        return self
+
+    def __exit__(self, ty, value, tb):
+        # type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> bool
+        if ty is not None and value is not None:
+            capture_internal_exception((ty, value, tb))
+
+        return True
+
+
+_CAPTURE_INTERNAL_EXCEPTION = CaptureInternalException()
+
+
+def capture_internal_exceptions():
+    # type: () -> ContextManager[Any]
+    return _CAPTURE_INTERNAL_EXCEPTION
+
+
+def capture_internal_exception(exc_info):
+    # type: (ExcInfo) -> None
+    """
+    Capture an exception that is likely caused by a bug in the SDK
+    itself.
+
+    These exceptions do not end up in Sentry and are just logged instead.
+    """
+    if sentry_sdk.get_client().is_active():
+        logger.error("Internal error in sentry_sdk", exc_info=exc_info)
+
+
+def to_timestamp(value):
+    # type: (datetime) -> float
+    return (value - epoch).total_seconds()
+
+
+def format_timestamp(value):
+    # type: (datetime) -> str
+    """Formats a timestamp in RFC 3339 format.
+
+    Any datetime objects with a non-UTC timezone are converted to UTC, so that all timestamps are formatted in UTC.
+    """
+    utctime = value.astimezone(timezone.utc)
+
+    # We use this custom formatting rather than isoformat for backwards compatibility (we have used this format for
+    # several years now), and isoformat is slightly different.
+    return utctime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+
+ISO_TZ_SEPARATORS = frozenset(("+", "-"))
+
+
+def datetime_from_isoformat(value):
+    # type: (str) -> datetime
+    try:
+        result = datetime.fromisoformat(value)
+    except (AttributeError, ValueError):
+        # py 3.6
+        timestamp_format = (
+            "%Y-%m-%dT%H:%M:%S.%f" if "." in value else "%Y-%m-%dT%H:%M:%S"
+        )
+        if value.endswith("Z"):
+            value = value[:-1] + "+0000"
+
+        if value[-6] in ISO_TZ_SEPARATORS:
+            timestamp_format += "%z"
+            value = value[:-3] + value[-2:]
+        elif value[-5] in ISO_TZ_SEPARATORS:
+            timestamp_format += "%z"
+
+        result = datetime.strptime(value, timestamp_format)
+    return result.astimezone(timezone.utc)
+
+
+def event_hint_with_exc_info(exc_info=None):
+    # type: (Optional[ExcInfo]) -> Dict[str, Optional[ExcInfo]]
+    """Creates a hint with the exc info filled in."""
+    if exc_info is None:
+        exc_info = sys.exc_info()
+    else:
+        exc_info = exc_info_from_error(exc_info)
+    if exc_info[0] is None:
+        exc_info = None
+    return {"exc_info": exc_info}
+
+
+class BadDsn(ValueError):
+    """Raised on invalid DSNs."""
+
+
+class Dsn:
+    """Represents a DSN."""
+
+    def __init__(self, value):
+        # type: (Union[Dsn, str]) -> None
+        if isinstance(value, Dsn):
+            self.__dict__ = dict(value.__dict__)
+            return
+        parts = urlsplit(str(value))
+
+        if parts.scheme not in ("http", "https"):
+            raise BadDsn("Unsupported scheme %r" % parts.scheme)
+        self.scheme = parts.scheme
+
+        if parts.hostname is None:
+            raise BadDsn("Missing hostname")
+
+        self.host = parts.hostname
+
+        if parts.port is None:
+            self.port = self.scheme == "https" and 443 or 80  # type: int
+        else:
+            self.port = parts.port
+
+        if not parts.username:
+            raise BadDsn("Missing public key")
+
+        self.public_key = parts.username
+        self.secret_key = parts.password
+
+        path = parts.path.rsplit("/", 1)
+
+        try:
+            self.project_id = str(int(path.pop()))
+        except (ValueError, TypeError):
+            raise BadDsn("Invalid project in DSN (%r)" % (parts.path or "")[1:])
+
+        self.path = "/".join(path) + "/"
+
+    @property
+    def netloc(self):
+        # type: () -> str
+        """The netloc part of a DSN."""
+        rv = self.host
+        if (self.scheme, self.port) not in (("http", 80), ("https", 443)):
+            rv = "%s:%s" % (rv, self.port)
+        return rv
+
+    def to_auth(self, client=None):
+        # type: (Optional[Any]) -> Auth
+        """Returns the auth info object for this dsn."""
+        return Auth(
+            scheme=self.scheme,
+            host=self.netloc,
+            path=self.path,
+            project_id=self.project_id,
+            public_key=self.public_key,
+            secret_key=self.secret_key,
+            client=client,
+        )
+
+    def __str__(self):
+        # type: () -> str
+        return "%s://%s%s@%s%s%s" % (
+            self.scheme,
+            self.public_key,
+            self.secret_key and "@" + self.secret_key or "",
+            self.netloc,
+            self.path,
+            self.project_id,
+        )
+
+
+class Auth:
+    """Helper object that represents the auth info."""
+
+    def __init__(
+        self,
+        scheme,
+        host,
+        project_id,
+        public_key,
+        secret_key=None,
+        version=7,
+        client=None,
+        path="/",
+    ):
+        # type: (str, str, str, str, Optional[str], int, Optional[Any], str) -> None
+        self.scheme = scheme
+        self.host = host
+        self.path = path
+        self.project_id = project_id
+        self.public_key = public_key
+        self.secret_key = secret_key
+        self.version = version
+        self.client = client
+
+    def get_api_url(
+        self, type=EndpointType.ENVELOPE  # type: EndpointType
+    ):
+        # type: (...) -> str
+        """Returns the API url for storing events."""
+        return "%s://%s%sapi/%s/%s/" % (
+            self.scheme,
+            self.host,
+            self.path,
+            self.project_id,
+            type.value,
+        )
+
+    def to_header(self):
+        # type: () -> str
+        """Returns the auth header a string."""
+        rv = [("sentry_key", self.public_key), ("sentry_version", self.version)]
+        if self.client is not None:
+            rv.append(("sentry_client", self.client))
+        if self.secret_key is not None:
+            rv.append(("sentry_secret", self.secret_key))
+        return "Sentry " + ", ".join("%s=%s" % (key, value) for key, value in rv)
+
+
+def get_type_name(cls):
+    # type: (Optional[type]) -> Optional[str]
+    return getattr(cls, "__qualname__", None) or getattr(cls, "__name__", None)
+
+
+def get_type_module(cls):
+    # type: (Optional[type]) -> Optional[str]
+    mod = getattr(cls, "__module__", None)
+    if mod not in (None, "builtins", "__builtins__"):
+        return mod
+    return None
+
+
+def should_hide_frame(frame):
+    # type: (FrameType) -> bool
+    try:
+        mod = frame.f_globals["__name__"]
+        if mod.startswith("sentry_sdk."):
+            return True
+    except (AttributeError, KeyError):
+        pass
+
+    for flag_name in "__traceback_hide__", "__tracebackhide__":
+        try:
+            if frame.f_locals[flag_name]:
+                return True
+        except Exception:
+            pass
+
+    return False
+
+
+def iter_stacks(tb):
+    # type: (Optional[TracebackType]) -> Iterator[TracebackType]
+    tb_ = tb  # type: Optional[TracebackType]
+    while tb_ is not None:
+        if not should_hide_frame(tb_.tb_frame):
+            yield tb_
+        tb_ = tb_.tb_next
+
+
+def get_lines_from_file(
+    filename,  # type: str
+    lineno,  # type: int
+    max_length=None,  # type: Optional[int]
+    loader=None,  # type: Optional[Any]
+    module=None,  # type: Optional[str]
+):
+    # type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
+    context_lines = 5
+    source = None
+    if loader is not None and hasattr(loader, "get_source"):
+        try:
+            source_str = loader.get_source(module)  # type: Optional[str]
+        except (ImportError, IOError):
+            source_str = None
+        if source_str is not None:
+            source = source_str.splitlines()
+
+    if source is None:
+        try:
+            source = linecache.getlines(filename)
+        except (OSError, IOError):
+            return [], None, []
+
+    if not source:
+        return [], None, []
+
+    lower_bound = max(0, lineno - context_lines)
+    upper_bound = min(lineno + 1 + context_lines, len(source))
+
+    try:
+        pre_context = [
+            strip_string(line.strip("\r\n"), max_length=max_length)
+            for line in source[lower_bound:lineno]
+        ]
+        context_line = strip_string(source[lineno].strip("\r\n"), max_length=max_length)
+        post_context = [
+            strip_string(line.strip("\r\n"), max_length=max_length)
+            for line in source[(lineno + 1) : upper_bound]
+        ]
+        return pre_context, context_line, post_context
+    except IndexError:
+        # the file may have changed since it was loaded into memory
+        return [], None, []
+
+
+def get_source_context(
+    frame,  # type: FrameType
+    tb_lineno,  # type: Optional[int]
+    max_value_length=None,  # type: Optional[int]
+):
+    # type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
+    try:
+        abs_path = frame.f_code.co_filename  # type: Optional[str]
+    except Exception:
+        abs_path = None
+    try:
+        module = frame.f_globals["__name__"]
+    except Exception:
+        return [], None, []
+    try:
+        loader = frame.f_globals["__loader__"]
+    except Exception:
+        loader = None
+
+    if tb_lineno is not None and abs_path:
+        lineno = tb_lineno - 1
+        return get_lines_from_file(
+            abs_path, lineno, max_value_length, loader=loader, module=module
+        )
+
+    return [], None, []
+
+
+def safe_str(value):
+    # type: (Any) -> str
+    try:
+        return str(value)
+    except Exception:
+        return safe_repr(value)
+
+
+def safe_repr(value):
+    # type: (Any) -> str
+    try:
+        return repr(value)
+    except Exception:
+        return "<broken repr>"
+
+
+def filename_for_module(module, abs_path):
+    # type: (Optional[str], Optional[str]) -> Optional[str]
+    if not abs_path or not module:
+        return abs_path
+
+    try:
+        if abs_path.endswith(".pyc"):
+            abs_path = abs_path[:-1]
+
+        base_module = module.split(".", 1)[0]
+        if base_module == module:
+            return os.path.basename(abs_path)
+
+        base_module_path = sys.modules[base_module].__file__
+        if not base_module_path:
+            return abs_path
+
+        return abs_path.split(base_module_path.rsplit(os.sep, 2)[0], 1)[-1].lstrip(
+            os.sep
+        )
+    except Exception:
+        return abs_path
+
+
+def serialize_frame(
+    frame,
+    tb_lineno=None,
+    include_local_variables=True,
+    include_source_context=True,
+    max_value_length=None,
+    custom_repr=None,
+):
+    # type: (FrameType, Optional[int], bool, bool, Optional[int], Optional[Callable[..., Optional[str]]]) -> Dict[str, Any]
+    f_code = getattr(frame, "f_code", None)
+    if not f_code:
+        abs_path = None
+        function = None
+    else:
+        abs_path = frame.f_code.co_filename
+        function = frame.f_code.co_name
+    try:
+        module = frame.f_globals["__name__"]
+    except Exception:
+        module = None
+
+    if tb_lineno is None:
+        tb_lineno = frame.f_lineno
+
+    rv = {
+        "filename": filename_for_module(module, abs_path) or None,
+        "abs_path": os.path.abspath(abs_path) if abs_path else None,
+        "function": function or "<unknown>",
+        "module": module,
+        "lineno": tb_lineno,
+    }  # type: Dict[str, Any]
+
+    if include_source_context:
+        rv["pre_context"], rv["context_line"], rv["post_context"] = get_source_context(
+            frame, tb_lineno, max_value_length
+        )
+
+    if include_local_variables:
+        from sentry_sdk.serializer import serialize
+
+        rv["vars"] = serialize(
+            dict(frame.f_locals), is_vars=True, custom_repr=custom_repr
+        )
+
+    return rv
+
+
+def current_stacktrace(
+    include_local_variables=True,  # type: bool
+    include_source_context=True,  # type: bool
+    max_value_length=None,  # type: Optional[int]
+):
+    # type: (...) -> Dict[str, Any]
+    __tracebackhide__ = True
+    frames = []
+
+    f = sys._getframe()  # type: Optional[FrameType]
+    while f is not None:
+        if not should_hide_frame(f):
+            frames.append(
+                serialize_frame(
+                    f,
+                    include_local_variables=include_local_variables,
+                    include_source_context=include_source_context,
+                    max_value_length=max_value_length,
+                )
+            )
+        f = f.f_back
+
+    frames.reverse()
+
+    return {"frames": frames}
+
+
+def get_errno(exc_value):
+    # type: (BaseException) -> Optional[Any]
+    return getattr(exc_value, "errno", None)
+
+
+def get_error_message(exc_value):
+    # type: (Optional[BaseException]) -> str
+    message = (
+        getattr(exc_value, "message", "")
+        or getattr(exc_value, "detail", "")
+        or safe_str(exc_value)
+    )  # type: str
+
+    # __notes__ should be a list of strings when notes are added
+    # via add_note, but can be anything else if __notes__ is set
+    # directly. We only support strings in __notes__, since that
+    # is the correct use.
+    notes = getattr(exc_value, "__notes__", None)  # type: object
+    if isinstance(notes, list) and len(notes) > 0:
+        message += "\n" + "\n".join(note for note in notes if isinstance(note, str))
+
+    return message
+
+
+def single_exception_from_error_tuple(
+    exc_type,  # type: Optional[type]
+    exc_value,  # type: Optional[BaseException]
+    tb,  # type: Optional[TracebackType]
+    client_options=None,  # type: Optional[Dict[str, Any]]
+    mechanism=None,  # type: Optional[Dict[str, Any]]
+    exception_id=None,  # type: Optional[int]
+    parent_id=None,  # type: Optional[int]
+    source=None,  # type: Optional[str]
+    full_stack=None,  # type: Optional[list[dict[str, Any]]]
+):
+    # type: (...) -> Dict[str, Any]
+    """
+    Creates a dict that goes into the events `exception.values` list and is ingestible by Sentry.
+
+    See the Exception Interface documentation for more details:
+    https://develop.sentry.dev/sdk/event-payloads/exception/
+    """
+    exception_value = {}  # type: Dict[str, Any]
+    exception_value["mechanism"] = (
+        mechanism.copy() if mechanism else {"type": "generic", "handled": True}
+    )
+    if exception_id is not None:
+        exception_value["mechanism"]["exception_id"] = exception_id
+
+    if exc_value is not None:
+        errno = get_errno(exc_value)
+    else:
+        errno = None
+
+    if errno is not None:
+        exception_value["mechanism"].setdefault("meta", {}).setdefault(
+            "errno", {}
+        ).setdefault("number", errno)
+
+    if source is not None:
+        exception_value["mechanism"]["source"] = source
+
+    is_root_exception = exception_id == 0
+    if not is_root_exception and parent_id is not None:
+        exception_value["mechanism"]["parent_id"] = parent_id
+        exception_value["mechanism"]["type"] = "chained"
+
+    if is_root_exception and "type" not in exception_value["mechanism"]:
+        exception_value["mechanism"]["type"] = "generic"
+
+    is_exception_group = BaseExceptionGroup is not None and isinstance(
+        exc_value, BaseExceptionGroup
+    )
+    if is_exception_group:
+        exception_value["mechanism"]["is_exception_group"] = True
+
+    exception_value["module"] = get_type_module(exc_type)
+    exception_value["type"] = get_type_name(exc_type)
+    exception_value["value"] = get_error_message(exc_value)
+
+    if client_options is None:
+        include_local_variables = True
+        include_source_context = True
+        max_value_length = DEFAULT_MAX_VALUE_LENGTH  # fallback
+        custom_repr = None
+    else:
+        include_local_variables = client_options["include_local_variables"]
+        include_source_context = client_options["include_source_context"]
+        max_value_length = client_options["max_value_length"]
+        custom_repr = client_options.get("custom_repr")
+
+    frames = [
+        serialize_frame(
+            tb.tb_frame,
+            tb_lineno=tb.tb_lineno,
+            include_local_variables=include_local_variables,
+            include_source_context=include_source_context,
+            max_value_length=max_value_length,
+            custom_repr=custom_repr,
+        )
+        for tb in iter_stacks(tb)
+    ]  # type: List[Dict[str, Any]]
+
+    if frames:
+        if not full_stack:
+            new_frames = frames
+        else:
+            new_frames = merge_stack_frames(frames, full_stack, client_options)
+
+        exception_value["stacktrace"] = {"frames": new_frames}
+
+    return exception_value
+
+
+HAS_CHAINED_EXCEPTIONS = hasattr(Exception, "__suppress_context__")
+
+if HAS_CHAINED_EXCEPTIONS:
+
+    def walk_exception_chain(exc_info):
+        # type: (ExcInfo) -> Iterator[ExcInfo]
+        exc_type, exc_value, tb = exc_info
+
+        seen_exceptions = []
+        seen_exception_ids = set()  # type: Set[int]
+
+        while (
+            exc_type is not None
+            and exc_value is not None
+            and id(exc_value) not in seen_exception_ids
+        ):
+            yield exc_type, exc_value, tb
+
+            # Avoid hashing random types we don't know anything
+            # about. Use the list to keep a ref so that the `id` is
+            # not used for another object.
+            seen_exceptions.append(exc_value)
+            seen_exception_ids.add(id(exc_value))
+
+            if exc_value.__suppress_context__:
+                cause = exc_value.__cause__
+            else:
+                cause = exc_value.__context__
+            if cause is None:
+                break
+            exc_type = type(cause)
+            exc_value = cause
+            tb = getattr(cause, "__traceback__", None)
+
+else:
+
+    def walk_exception_chain(exc_info):
+        # type: (ExcInfo) -> Iterator[ExcInfo]
+        yield exc_info
+
+
+def exceptions_from_error(
+    exc_type,  # type: Optional[type]
+    exc_value,  # type: Optional[BaseException]
+    tb,  # type: Optional[TracebackType]
+    client_options=None,  # type: Optional[Dict[str, Any]]
+    mechanism=None,  # type: Optional[Dict[str, Any]]
+    exception_id=0,  # type: int
+    parent_id=0,  # type: int
+    source=None,  # type: Optional[str]
+    full_stack=None,  # type: Optional[list[dict[str, Any]]]
+):
+    # type: (...) -> Tuple[int, List[Dict[str, Any]]]
+    """
+    Creates the list of exceptions.
+    This can include chained exceptions and exceptions from an ExceptionGroup.
+
+    See the Exception Interface documentation for more details:
+    https://develop.sentry.dev/sdk/event-payloads/exception/
+    """
+
+    parent = single_exception_from_error_tuple(
+        exc_type=exc_type,
+        exc_value=exc_value,
+        tb=tb,
+        client_options=client_options,
+        mechanism=mechanism,
+        exception_id=exception_id,
+        parent_id=parent_id,
+        source=source,
+        full_stack=full_stack,
+    )
+    exceptions = [parent]
+
+    parent_id = exception_id
+    exception_id += 1
+
+    should_supress_context = hasattr(exc_value, "__suppress_context__") and exc_value.__suppress_context__  # type: ignore
+    if should_supress_context:
+        # Add direct cause.
+        # The field `__cause__` is set when raised with the exception (using the `from` keyword).
+        exception_has_cause = (
+            exc_value
+            and hasattr(exc_value, "__cause__")
+            and exc_value.__cause__ is not None
+        )
+        if exception_has_cause:
+            cause = exc_value.__cause__  # type: ignore
+            (exception_id, child_exceptions) = exceptions_from_error(
+                exc_type=type(cause),
+                exc_value=cause,
+                tb=getattr(cause, "__traceback__", None),
+                client_options=client_options,
+                mechanism=mechanism,
+                exception_id=exception_id,
+                source="__cause__",
+                full_stack=full_stack,
+            )
+            exceptions.extend(child_exceptions)
+
+    else:
+        # Add indirect cause.
+        # The field `__context__` is assigned if another exception occurs while handling the exception.
+        exception_has_content = (
+            exc_value
+            and hasattr(exc_value, "__context__")
+            and exc_value.__context__ is not None
+        )
+        if exception_has_content:
+            context = exc_value.__context__  # type: ignore
+            (exception_id, child_exceptions) = exceptions_from_error(
+                exc_type=type(context),
+                exc_value=context,
+                tb=getattr(context, "__traceback__", None),
+                client_options=client_options,
+                mechanism=mechanism,
+                exception_id=exception_id,
+                source="__context__",
+                full_stack=full_stack,
+            )
+            exceptions.extend(child_exceptions)
+
+    # Add exceptions from an ExceptionGroup.
+    is_exception_group = exc_value and hasattr(exc_value, "exceptions")
+    if is_exception_group:
+        for idx, e in enumerate(exc_value.exceptions):  # type: ignore
+            (exception_id, child_exceptions) = exceptions_from_error(
+                exc_type=type(e),
+                exc_value=e,
+                tb=getattr(e, "__traceback__", None),
+                client_options=client_options,
+                mechanism=mechanism,
+                exception_id=exception_id,
+                parent_id=parent_id,
+                source="exceptions[%s]" % idx,
+                full_stack=full_stack,
+            )
+            exceptions.extend(child_exceptions)
+
+    return (exception_id, exceptions)
+
+
+def exceptions_from_error_tuple(
+    exc_info,  # type: ExcInfo
+    client_options=None,  # type: Optional[Dict[str, Any]]
+    mechanism=None,  # type: Optional[Dict[str, Any]]
+    full_stack=None,  # type: Optional[list[dict[str, Any]]]
+):
+    # type: (...) -> List[Dict[str, Any]]
+    exc_type, exc_value, tb = exc_info
+
+    is_exception_group = BaseExceptionGroup is not None and isinstance(
+        exc_value, BaseExceptionGroup
+    )
+
+    if is_exception_group:
+        (_, exceptions) = exceptions_from_error(
+            exc_type=exc_type,
+            exc_value=exc_value,
+            tb=tb,
+            client_options=client_options,
+            mechanism=mechanism,
+            exception_id=0,
+            parent_id=0,
+            full_stack=full_stack,
+        )
+
+    else:
+        exceptions = []
+        for exc_type, exc_value, tb in walk_exception_chain(exc_info):
+            exceptions.append(
+                single_exception_from_error_tuple(
+                    exc_type=exc_type,
+                    exc_value=exc_value,
+                    tb=tb,
+                    client_options=client_options,
+                    mechanism=mechanism,
+                    full_stack=full_stack,
+                )
+            )
+
+    exceptions.reverse()
+
+    return exceptions
+
+
+def to_string(value):
+    # type: (str) -> str
+    try:
+        return str(value)
+    except UnicodeDecodeError:
+        return repr(value)[1:-1]
+
+
+def iter_event_stacktraces(event):
+    # type: (Event) -> Iterator[Dict[str, Any]]
+    if "stacktrace" in event:
+        yield event["stacktrace"]
+    if "threads" in event:
+        for thread in event["threads"].get("values") or ():
+            if "stacktrace" in thread:
+                yield thread["stacktrace"]
+    if "exception" in event:
+        for exception in event["exception"].get("values") or ():
+            if "stacktrace" in exception:
+                yield exception["stacktrace"]
+
+
+def iter_event_frames(event):
+    # type: (Event) -> Iterator[Dict[str, Any]]
+    for stacktrace in iter_event_stacktraces(event):
+        for frame in stacktrace.get("frames") or ():
+            yield frame
+
+
+def handle_in_app(event, in_app_exclude=None, in_app_include=None, project_root=None):
+    # type: (Event, Optional[List[str]], Optional[List[str]], Optional[str]) -> Event
+    for stacktrace in iter_event_stacktraces(event):
+        set_in_app_in_frames(
+            stacktrace.get("frames"),
+            in_app_exclude=in_app_exclude,
+            in_app_include=in_app_include,
+            project_root=project_root,
+        )
+
+    return event
+
+
+def set_in_app_in_frames(frames, in_app_exclude, in_app_include, project_root=None):
+    # type: (Any, Optional[List[str]], Optional[List[str]], Optional[str]) -> Optional[Any]
+    if not frames:
+        return None
+
+    for frame in frames:
+        # if frame has already been marked as in_app, skip it
+        current_in_app = frame.get("in_app")
+        if current_in_app is not None:
+            continue
+
+        module = frame.get("module")
+
+        # check if module in frame is in the list of modules to include
+        if _module_in_list(module, in_app_include):
+            frame["in_app"] = True
+            continue
+
+        # check if module in frame is in the list of modules to exclude
+        if _module_in_list(module, in_app_exclude):
+            frame["in_app"] = False
+            continue
+
+        # if frame has no abs_path, skip further checks
+        abs_path = frame.get("abs_path")
+        if abs_path is None:
+            continue
+
+        if _is_external_source(abs_path):
+            frame["in_app"] = False
+            continue
+
+        if _is_in_project_root(abs_path, project_root):
+            frame["in_app"] = True
+            continue
+
+    return frames
+
+
+def exc_info_from_error(error):
+    # type: (Union[BaseException, ExcInfo]) -> ExcInfo
+    if isinstance(error, tuple) and len(error) == 3:
+        exc_type, exc_value, tb = error
+    elif isinstance(error, BaseException):
+        tb = getattr(error, "__traceback__", None)
+        if tb is not None:
+            exc_type = type(error)
+            exc_value = error
+        else:
+            exc_type, exc_value, tb = sys.exc_info()
+            if exc_value is not error:
+                tb = None
+                exc_value = error
+                exc_type = type(error)
+
+    else:
+        raise ValueError("Expected Exception object to report, got %s!" % type(error))
+
+    exc_info = (exc_type, exc_value, tb)
+
+    if TYPE_CHECKING:
+        # This cast is safe because exc_type and exc_value are either both
+        # None or both not None.
+        exc_info = cast(ExcInfo, exc_info)
+
+    return exc_info
+
+
+def merge_stack_frames(frames, full_stack, client_options):
+    # type: (List[Dict[str, Any]], List[Dict[str, Any]], Optional[Dict[str, Any]]) -> List[Dict[str, Any]]
+    """
+    Add the missing frames from full_stack to frames and return the merged list.
+    """
+    frame_ids = {
+        (
+            frame["abs_path"],
+            frame["context_line"],
+            frame["lineno"],
+            frame["function"],
+        )
+        for frame in frames
+    }
+
+    new_frames = [
+        stackframe
+        for stackframe in full_stack
+        if (
+            stackframe["abs_path"],
+            stackframe["context_line"],
+            stackframe["lineno"],
+            stackframe["function"],
+        )
+        not in frame_ids
+    ]
+    new_frames.extend(frames)
+
+    # Limit the number of frames
+    max_stack_frames = (
+        client_options.get("max_stack_frames", DEFAULT_MAX_STACK_FRAMES)
+        if client_options
+        else None
+    )
+    if max_stack_frames is not None:
+        new_frames = new_frames[len(new_frames) - max_stack_frames :]
+
+    return new_frames
+
+
+def event_from_exception(
+    exc_info,  # type: Union[BaseException, ExcInfo]
+    client_options=None,  # type: Optional[Dict[str, Any]]
+    mechanism=None,  # type: Optional[Dict[str, Any]]
+):
+    # type: (...) -> Tuple[Event, Dict[str, Any]]
+    exc_info = exc_info_from_error(exc_info)
+    hint = event_hint_with_exc_info(exc_info)
+
+    if client_options and client_options.get("add_full_stack", DEFAULT_ADD_FULL_STACK):
+        full_stack = current_stacktrace(
+            include_local_variables=client_options["include_local_variables"],
+            max_value_length=client_options["max_value_length"],
+        )["frames"]
+    else:
+        full_stack = None
+
+    return (
+        {
+            "level": "error",
+            "exception": {
+                "values": exceptions_from_error_tuple(
+                    exc_info, client_options, mechanism, full_stack
+                )
+            },
+        },
+        hint,
+    )
+
+
+def _module_in_list(name, items):
+    # type: (Optional[str], Optional[List[str]]) -> bool
+    if name is None:
+        return False
+
+    if not items:
+        return False
+
+    for item in items:
+        if item == name or name.startswith(item + "."):
+            return True
+
+    return False
+
+
+def _is_external_source(abs_path):
+    # type: (Optional[str]) -> bool
+    # check if frame is in 'site-packages' or 'dist-packages'
+    if abs_path is None:
+        return False
+
+    external_source = (
+        re.search(r"[\\/](?:dist|site)-packages[\\/]", abs_path) is not None
+    )
+    return external_source
+
+
+def _is_in_project_root(abs_path, project_root):
+    # type: (Optional[str], Optional[str]) -> bool
+    if abs_path is None or project_root is None:
+        return False
+
+    # check if path is in the project root
+    if abs_path.startswith(project_root):
+        return True
+
+    return False
+
+
+def _truncate_by_bytes(string, max_bytes):
+    # type: (str, int) -> str
+    """
+    Truncate a UTF-8-encodable string to the last full codepoint so that it fits in max_bytes.
+    """
+    truncated = string.encode("utf-8")[: max_bytes - 3].decode("utf-8", errors="ignore")
+
+    return truncated + "..."
+
+
+def _get_size_in_bytes(value):
+    # type: (str) -> Optional[int]
+    try:
+        return len(value.encode("utf-8"))
+    except (UnicodeEncodeError, UnicodeDecodeError):
+        return None
+
+
+def strip_string(value, max_length=None):
+    # type: (str, Optional[int]) -> Union[AnnotatedValue, str]
+    if not value:
+        return value
+
+    if max_length is None:
+        max_length = DEFAULT_MAX_VALUE_LENGTH
+
+    byte_size = _get_size_in_bytes(value)
+    text_size = len(value)
+
+    if byte_size is not None and byte_size > max_length:
+        # truncate to max_length bytes, preserving code points
+        truncated_value = _truncate_by_bytes(value, max_length)
+    elif text_size is not None and text_size > max_length:
+        # fallback to truncating by string length
+        truncated_value = value[: max_length - 3] + "..."
+    else:
+        return value
+
+    return AnnotatedValue(
+        value=truncated_value,
+        metadata={
+            "len": byte_size or text_size,
+            "rem": [["!limit", "x", max_length - 3, max_length]],
+        },
+    )
+
+
+def parse_version(version):
+    # type: (str) -> Optional[Tuple[int, ...]]
+    """
+    Parses a version string into a tuple of integers.
+    This uses the parsing loging from PEP 440:
+    https://peps.python.org/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
+    """
+    VERSION_PATTERN = r"""  # noqa: N806
+        v?
+        (?:
+            (?:(?P<epoch>[0-9]+)!)?                           # epoch
+            (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+            (?P<pre>                                          # pre-release
+                [-_\.]?
+                (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+                [-_\.]?
+                (?P<pre_n>[0-9]+)?
+            )?
+            (?P<post>                                         # post release
+                (?:-(?P<post_n1>[0-9]+))
+                |
+                (?:
+                    [-_\.]?
+                    (?P<post_l>post|rev|r)
+                    [-_\.]?
+                    (?P<post_n2>[0-9]+)?
+                )
+            )?
+            (?P<dev>                                          # dev release
+                [-_\.]?
+                (?P<dev_l>dev)
+                [-_\.]?
+                (?P<dev_n>[0-9]+)?
+            )?
+        )
+        (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+    """
+
+    pattern = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    try:
+        release = pattern.match(version).groupdict()["release"]  # type: ignore
+        release_tuple = tuple(map(int, release.split(".")[:3]))  # type: Tuple[int, ...]
+    except (TypeError, ValueError, AttributeError):
+        return None
+
+    return release_tuple
+
+
+def _is_contextvars_broken():
+    # type: () -> bool
+    """
+    Returns whether gevent/eventlet have patched the stdlib in a way where thread locals are now more "correct" than contextvars.
+    """
+    try:
+        import gevent
+        from gevent.monkey import is_object_patched
+
+        # Get the MAJOR and MINOR version numbers of Gevent
+        version_tuple = tuple(
+            [int(part) for part in re.split(r"a|b|rc|\.", gevent.__version__)[:2]]
+        )
+        if is_object_patched("threading", "local"):
+            # Gevent 20.9.0 depends on Greenlet 0.4.17 which natively handles switching
+            # context vars when greenlets are switched, so, Gevent 20.9.0+ is all fine.
+            # Ref: https://github.com/gevent/gevent/blob/83c9e2ae5b0834b8f84233760aabe82c3ba065b4/src/gevent/monkey.py#L604-L609
+            # Gevent 20.5, that doesn't depend on Greenlet 0.4.17 with native support
+            # for contextvars, is able to patch both thread locals and contextvars, in
+            # that case, check if contextvars are effectively patched.
+            if (
+                # Gevent 20.9.0+
+                (sys.version_info >= (3, 7) and version_tuple >= (20, 9))
+                # Gevent 20.5.0+ or Python < 3.7
+                or (is_object_patched("contextvars", "ContextVar"))
+            ):
+                return False
+
+            return True
+    except ImportError:
+        pass
+
+    try:
+        import greenlet
+        from eventlet.patcher import is_monkey_patched  # type: ignore
+
+        greenlet_version = parse_version(greenlet.__version__)
+
+        if greenlet_version is None:
+            logger.error(
+                "Internal error in Sentry SDK: Could not parse Greenlet version from greenlet.__version__."
+            )
+            return False
+
+        if is_monkey_patched("thread") and greenlet_version < (0, 5):
+            return True
+    except ImportError:
+        pass
+
+    return False
+
+
+def _make_threadlocal_contextvars(local):
+    # type: (type) -> type
+    class ContextVar:
+        # Super-limited impl of ContextVar
+
+        def __init__(self, name, default=None):
+            # type: (str, Any) -> None
+            self._name = name
+            self._default = default
+            self._local = local()
+            self._original_local = local()
+
+        def get(self, default=None):
+            # type: (Any) -> Any
+            return getattr(self._local, "value", default or self._default)
+
+        def set(self, value):
+            # type: (Any) -> Any
+            token = str(random.getrandbits(64))
+            original_value = self.get()
+            setattr(self._original_local, token, original_value)
+            self._local.value = value
+            return token
+
+        def reset(self, token):
+            # type: (Any) -> None
+            self._local.value = getattr(self._original_local, token)
+            # delete the original value (this way it works in Python 3.6+)
+            del self._original_local.__dict__[token]
+
+    return ContextVar
+
+
+def _get_contextvars():
+    # type: () -> Tuple[bool, type]
+    """
+    Figure out the "right" contextvars installation to use. Returns a
+    `contextvars.ContextVar`-like class with a limited API.
+
+    See https://docs.sentry.io/platforms/python/contextvars/ for more information.
+    """
+    if not _is_contextvars_broken():
+        # aiocontextvars is a PyPI package that ensures that the contextvars
+        # backport (also a PyPI package) works with asyncio under Python 3.6
+        #
+        # Import it if available.
+        if sys.version_info < (3, 7):
+            # `aiocontextvars` is absolutely required for functional
+            # contextvars on Python 3.6.
+            try:
+                from aiocontextvars import ContextVar
+
+                return True, ContextVar
+            except ImportError:
+                pass
+        else:
+            # On Python 3.7 contextvars are functional.
+            try:
+                from contextvars import ContextVar
+
+                return True, ContextVar
+            except ImportError:
+                pass
+
+    # Fall back to basic thread-local usage.
+
+    from threading import local
+
+    return False, _make_threadlocal_contextvars(local)
+
+
+HAS_REAL_CONTEXTVARS, ContextVar = _get_contextvars()
+
+CONTEXTVARS_ERROR_MESSAGE = """
+
+With asyncio/ASGI applications, the Sentry SDK requires a functional
+installation of `contextvars` to avoid leaking scope/context data across
+requests.
+
+Please refer to https://docs.sentry.io/platforms/python/contextvars/ for more information.
+"""
+
+
+def qualname_from_function(func):
+    # type: (Callable[..., Any]) -> Optional[str]
+    """Return the qualified name of func. Works with regular function, lambda, partial and partialmethod."""
+    func_qualname = None  # type: Optional[str]
+
+    # Python 2
+    try:
+        return "%s.%s.%s" % (
+            func.im_class.__module__,  # type: ignore
+            func.im_class.__name__,  # type: ignore
+            func.__name__,
+        )
+    except Exception:
+        pass
+
+    prefix, suffix = "", ""
+
+    if isinstance(func, partial) and hasattr(func.func, "__name__"):
+        prefix, suffix = "partial(<function ", ">)"
+        func = func.func
+    else:
+        # The _partialmethod attribute of methods wrapped with partialmethod() was renamed to __partialmethod__ in CPython 3.13:
+        # https://github.com/python/cpython/pull/16600
+        partial_method = getattr(func, "_partialmethod", None) or getattr(
+            func, "__partialmethod__", None
+        )
+        if isinstance(partial_method, partialmethod):
+            prefix, suffix = "partialmethod(<function ", ">)"
+            func = partial_method.func
+
+    if hasattr(func, "__qualname__"):
+        func_qualname = func.__qualname__
+    elif hasattr(func, "__name__"):  # Python 2.7 has no __qualname__
+        func_qualname = func.__name__
+
+    # Python 3: methods, functions, classes
+    if func_qualname is not None:
+        if hasattr(func, "__module__") and isinstance(func.__module__, str):
+            func_qualname = func.__module__ + "." + func_qualname
+        func_qualname = prefix + func_qualname + suffix
+
+    return func_qualname
+
+
+def transaction_from_function(func):
+    # type: (Callable[..., Any]) -> Optional[str]
+    return qualname_from_function(func)
+
+
+disable_capture_event = ContextVar("disable_capture_event")
+
+
+class ServerlessTimeoutWarning(Exception):  # noqa: N818
+    """Raised when a serverless method is about to reach its timeout."""
+
+    pass
+
+
+class TimeoutThread(threading.Thread):
+    """Creates a Thread which runs (sleeps) for a time duration equal to
+    waiting_time and raises a custom ServerlessTimeout exception.
+    """
+
+    def __init__(self, waiting_time, configured_timeout):
+        # type: (float, int) -> None
+        threading.Thread.__init__(self)
+        self.waiting_time = waiting_time
+        self.configured_timeout = configured_timeout
+        self._stop_event = threading.Event()
+
+    def stop(self):
+        # type: () -> None
+        self._stop_event.set()
+
+    def run(self):
+        # type: () -> None
+
+        self._stop_event.wait(self.waiting_time)
+
+        if self._stop_event.is_set():
+            return
+
+        integer_configured_timeout = int(self.configured_timeout)
+
+        # Setting up the exact integer value of configured time(in seconds)
+        if integer_configured_timeout < self.configured_timeout:
+            integer_configured_timeout = integer_configured_timeout + 1
+
+        # Raising Exception after timeout duration is reached
+        raise ServerlessTimeoutWarning(
+            "WARNING : Function is expected to get timed out. Configured timeout duration = {} seconds.".format(
+                integer_configured_timeout
+            )
+        )
+
+
+def to_base64(original):
+    # type: (str) -> Optional[str]
+    """
+    Convert a string to base64, via UTF-8. Returns None on invalid input.
+    """
+    base64_string = None
+
+    try:
+        utf8_bytes = original.encode("UTF-8")
+        base64_bytes = base64.b64encode(utf8_bytes)
+        base64_string = base64_bytes.decode("UTF-8")
+    except Exception as err:
+        logger.warning("Unable to encode {orig} to base64:".format(orig=original), err)
+
+    return base64_string
+
+
+def from_base64(base64_string):
+    # type: (str) -> Optional[str]
+    """
+    Convert a string from base64, via UTF-8. Returns None on invalid input.
+    """
+    utf8_string = None
+
+    try:
+        only_valid_chars = BASE64_ALPHABET.match(base64_string)
+        assert only_valid_chars
+
+        base64_bytes = base64_string.encode("UTF-8")
+        utf8_bytes = base64.b64decode(base64_bytes)
+        utf8_string = utf8_bytes.decode("UTF-8")
+    except Exception as err:
+        logger.warning(
+            "Unable to decode {b64} from base64:".format(b64=base64_string), err
+        )
+
+    return utf8_string
+
+
+Components = namedtuple("Components", ["scheme", "netloc", "path", "query", "fragment"])
+
+
+def sanitize_url(url, remove_authority=True, remove_query_values=True, split=False):
+    # type: (str, bool, bool, bool) -> Union[str, Components]
+    """
+    Removes the authority and query parameter values from a given URL.
+    """
+    parsed_url = urlsplit(url)
+    query_params = parse_qs(parsed_url.query, keep_blank_values=True)
+
+    # strip username:password (netloc can be usr:pwd@example.com)
+    if remove_authority:
+        netloc_parts = parsed_url.netloc.split("@")
+        if len(netloc_parts) > 1:
+            netloc = "%s:%s@%s" % (
+                SENSITIVE_DATA_SUBSTITUTE,
+                SENSITIVE_DATA_SUBSTITUTE,
+                netloc_parts[-1],
+            )
+        else:
+            netloc = parsed_url.netloc
+    else:
+        netloc = parsed_url.netloc
+
+    # strip values from query string
+    if remove_query_values:
+        query_string = unquote(
+            urlencode({key: SENSITIVE_DATA_SUBSTITUTE for key in query_params})
+        )
+    else:
+        query_string = parsed_url.query
+
+    components = Components(
+        scheme=parsed_url.scheme,
+        netloc=netloc,
+        query=query_string,
+        path=parsed_url.path,
+        fragment=parsed_url.fragment,
+    )
+
+    if split:
+        return components
+    else:
+        return urlunsplit(components)
+
+
+ParsedUrl = namedtuple("ParsedUrl", ["url", "query", "fragment"])
+
+
+def parse_url(url, sanitize=True):
+    # type: (str, bool) -> ParsedUrl
+    """
+    Splits a URL into a url (including path), query and fragment. If sanitize is True, the query
+    parameters will be sanitized to remove sensitive data. The autority (username and password)
+    in the URL will always be removed.
+    """
+    parsed_url = sanitize_url(
+        url, remove_authority=True, remove_query_values=sanitize, split=True
+    )
+
+    base_url = urlunsplit(
+        Components(
+            scheme=parsed_url.scheme,  # type: ignore
+            netloc=parsed_url.netloc,  # type: ignore
+            query="",
+            path=parsed_url.path,  # type: ignore
+            fragment="",
+        )
+    )
+
+    return ParsedUrl(
+        url=base_url,
+        query=parsed_url.query,  # type: ignore
+        fragment=parsed_url.fragment,  # type: ignore
+    )
+
+
+def is_valid_sample_rate(rate, source):
+    # type: (Any, str) -> bool
+    """
+    Checks the given sample rate to make sure it is valid type and value (a
+    boolean or a number between 0 and 1, inclusive).
+    """
+
+    # both booleans and NaN are instances of Real, so a) checking for Real
+    # checks for the possibility of a boolean also, and b) we have to check
+    # separately for NaN and Decimal does not derive from Real so need to check that too
+    if not isinstance(rate, (Real, Decimal)) or math.isnan(rate):
+        logger.warning(
+            "{source} Given sample rate is invalid. Sample rate must be a boolean or a number between 0 and 1. Got {rate} of type {type}.".format(
+                source=source, rate=rate, type=type(rate)
+            )
+        )
+        return False
+
+    # in case rate is a boolean, it will get cast to 1 if it's True and 0 if it's False
+    rate = float(rate)
+    if rate < 0 or rate > 1:
+        logger.warning(
+            "{source} Given sample rate is invalid. Sample rate must be between 0 and 1. Got {rate}.".format(
+                source=source, rate=rate
+            )
+        )
+        return False
+
+    return True
+
+
+def match_regex_list(item, regex_list=None, substring_matching=False):
+    # type: (str, Optional[List[str]], bool) -> bool
+    if regex_list is None:
+        return False
+
+    for item_matcher in regex_list:
+        if not substring_matching and item_matcher[-1] != "$":
+            item_matcher += "$"
+
+        matched = re.search(item_matcher, item)
+        if matched:
+            return True
+
+    return False
+
+
+def is_sentry_url(client, url):
+    # type: (sentry_sdk.client.BaseClient, str) -> bool
+    """
+    Determines whether the given URL matches the Sentry DSN.
+    """
+    return (
+        client is not None
+        and client.transport is not None
+        and client.transport.parsed_dsn is not None
+        and client.transport.parsed_dsn.netloc in url
+    )
+
+
+def _generate_installed_modules():
+    # type: () -> Iterator[Tuple[str, str]]
+    try:
+        from importlib import metadata
+
+        yielded = set()
+        for dist in metadata.distributions():
+            name = dist.metadata.get("Name", None)  # type: ignore[attr-defined]
+            # `metadata` values may be `None`, see:
+            # https://github.com/python/cpython/issues/91216
+            # and
+            # https://github.com/python/importlib_metadata/issues/371
+            if name is not None:
+                normalized_name = _normalize_module_name(name)
+                if dist.version is not None and normalized_name not in yielded:
+                    yield normalized_name, dist.version
+                    yielded.add(normalized_name)
+
+    except ImportError:
+        # < py3.8
+        try:
+            import pkg_resources
+        except ImportError:
+            return
+
+        for info in pkg_resources.working_set:
+            yield _normalize_module_name(info.key), info.version
+
+
+def _normalize_module_name(name):
+    # type: (str) -> str
+    return name.lower()
+
+
+def _get_installed_modules():
+    # type: () -> Dict[str, str]
+    global _installed_modules
+    if _installed_modules is None:
+        _installed_modules = dict(_generate_installed_modules())
+    return _installed_modules
+
+
+def package_version(package):
+    # type: (str) -> Optional[Tuple[int, ...]]
+    installed_packages = _get_installed_modules()
+    version = installed_packages.get(package)
+    if version is None:
+        return None
+
+    return parse_version(version)
+
+
+def reraise(tp, value, tb=None):
+    # type: (Optional[Type[BaseException]], Optional[BaseException], Optional[Any]) -> NoReturn
+    assert value is not None
+    if value.__traceback__ is not tb:
+        raise value.with_traceback(tb)
+    raise value
+
+
+def _no_op(*_a, **_k):
+    # type: (*Any, **Any) -> None
+    """No-op function for ensure_integration_enabled."""
+    pass
+
+
+if TYPE_CHECKING:
+
+    @overload
+    def ensure_integration_enabled(
+        integration,  # type: type[sentry_sdk.integrations.Integration]
+        original_function,  # type: Callable[P, R]
+    ):
+        # type: (...) -> Callable[[Callable[P, R]], Callable[P, R]]
+        ...
+
+    @overload
+    def ensure_integration_enabled(
+        integration,  # type: type[sentry_sdk.integrations.Integration]
+    ):
+        # type: (...) -> Callable[[Callable[P, None]], Callable[P, None]]
+        ...
+
+
+def ensure_integration_enabled(
+    integration,  # type: type[sentry_sdk.integrations.Integration]
+    original_function=_no_op,  # type: Union[Callable[P, R], Callable[P, None]]
+):
+    # type: (...) -> Callable[[Callable[P, R]], Callable[P, R]]
+    """
+    Ensures a given integration is enabled prior to calling a Sentry-patched function.
+
+    The function takes as its parameters the integration that must be enabled and the original
+    function that the SDK is patching. The function returns a function that takes the
+    decorated (Sentry-patched) function as its parameter, and returns a function that, when
+    called, checks whether the given integration is enabled. If the integration is enabled, the
+    function calls the decorated, Sentry-patched function. If the integration is not enabled,
+    the original function is called.
+
+    The function also takes care of preserving the original function's signature and docstring.
+
+    Example usage:
+
+    ```python
+    @ensure_integration_enabled(MyIntegration, my_function)
+    def patch_my_function():
+        with sentry_sdk.start_transaction(...):
+            return my_function()
+    ```
+    """
+    if TYPE_CHECKING:
+        # Type hint to ensure the default function has the right typing. The overloads
+        # ensure the default _no_op function is only used when R is None.
+        original_function = cast(Callable[P, R], original_function)
+
+    def patcher(sentry_patched_function):
+        # type: (Callable[P, R]) -> Callable[P, R]
+        def runner(*args: "P.args", **kwargs: "P.kwargs"):
+            # type: (...) -> R
+            if sentry_sdk.get_client().get_integration(integration) is None:
+                return original_function(*args, **kwargs)
+
+            return sentry_patched_function(*args, **kwargs)
+
+        if original_function is _no_op:
+            return wraps(sentry_patched_function)(runner)
+
+        return wraps(original_function)(runner)
+
+    return patcher
+
+
+if PY37:
+
+    def nanosecond_time():
+        # type: () -> int
+        return time.perf_counter_ns()
+
+else:
+
+    def nanosecond_time():
+        # type: () -> int
+        return int(time.perf_counter() * 1e9)
+
+
+def now():
+    # type: () -> float
+    return time.perf_counter()
+
+
+try:
+    from gevent import get_hub as get_gevent_hub
+    from gevent.monkey import is_module_patched
+except ImportError:
+
+    # it's not great that the signatures are different, get_hub can't return None
+    # consider adding an if TYPE_CHECKING to change the signature to Optional[Hub]
+    def get_gevent_hub():  # type: ignore[misc]
+        # type: () -> Optional[Hub]
+        return None
+
+    def is_module_patched(mod_name):
+        # type: (str) -> bool
+        # unable to import from gevent means no modules have been patched
+        return False
+
+
+def is_gevent():
+    # type: () -> bool
+    return is_module_patched("threading") or is_module_patched("_thread")
+
+
+def get_current_thread_meta(thread=None):
+    # type: (Optional[threading.Thread]) -> Tuple[Optional[int], Optional[str]]
+    """
+    Try to get the id of the current thread, with various fall backs.
+    """
+
+    # if a thread is specified, that takes priority
+    if thread is not None:
+        try:
+            thread_id = thread.ident
+            thread_name = thread.name
+            if thread_id is not None:
+                return thread_id, thread_name
+        except AttributeError:
+            pass
+
+    # if the app is using gevent, we should look at the gevent hub first
+    # as the id there differs from what the threading module reports
+    if is_gevent():
+        gevent_hub = get_gevent_hub()
+        if gevent_hub is not None:
+            try:
+                # this is undocumented, so wrap it in try except to be safe
+                return gevent_hub.thread_ident, None
+            except AttributeError:
+                pass
+
+    # use the current thread's id if possible
+    try:
+        thread = threading.current_thread()
+        thread_id = thread.ident
+        thread_name = thread.name
+        if thread_id is not None:
+            return thread_id, thread_name
+    except AttributeError:
+        pass
+
+    # if we can't get the current thread id, fall back to the main thread id
+    try:
+        thread = threading.main_thread()
+        thread_id = thread.ident
+        thread_name = thread.name
+        if thread_id is not None:
+            return thread_id, thread_name
+    except AttributeError:
+        pass
+
+    # we've tried everything, time to give up
+    return None, None
+
+
+def should_be_treated_as_error(ty, value):
+    # type: (Any, Any) -> bool
+    if ty == SystemExit and hasattr(value, "code") and value.code in (0, None):
+        # https://docs.python.org/3/library/exceptions.html#SystemExit
+        return False
+
+    return True
+
+
+if TYPE_CHECKING:
+    T = TypeVar("T")
+
+
+def try_convert(convert_func, value):
+    # type: (Callable[[Any], T], Any) -> Optional[T]
+    """
+    Attempt to convert from an unknown type to a specific type, using the
+    given function. Return None if the conversion fails, i.e. if the function
+    raises an exception.
+    """
+    try:
+        return convert_func(value)
+    except Exception:
+        return None
diff --git a/.venv/lib/python3.12/site-packages/sentry_sdk/worker.py b/.venv/lib/python3.12/site-packages/sentry_sdk/worker.py
new file mode 100644
index 00000000..b04ea582
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/sentry_sdk/worker.py
@@ -0,0 +1,141 @@
+import os
+import threading
+
+from time import sleep, time
+from sentry_sdk._queue import Queue, FullError
+from sentry_sdk.utils import logger
+from sentry_sdk.consts import DEFAULT_QUEUE_SIZE
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Any
+    from typing import Optional
+    from typing import Callable
+
+
+_TERMINATOR = object()
+
+
+class BackgroundWorker:
+    def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
+        # type: (int) -> None
+        self._queue = Queue(queue_size)  # type: Queue
+        self._lock = threading.Lock()
+        self._thread = None  # type: Optional[threading.Thread]
+        self._thread_for_pid = None  # type: Optional[int]
+
+    @property
+    def is_alive(self):
+        # type: () -> bool
+        if self._thread_for_pid != os.getpid():
+            return False
+        if not self._thread:
+            return False
+        return self._thread.is_alive()
+
+    def _ensure_thread(self):
+        # type: () -> None
+        if not self.is_alive:
+            self.start()
+
+    def _timed_queue_join(self, timeout):
+        # type: (float) -> bool
+        deadline = time() + timeout
+        queue = self._queue
+
+        queue.all_tasks_done.acquire()
+
+        try:
+            while queue.unfinished_tasks:
+                delay = deadline - time()
+                if delay <= 0:
+                    return False
+                queue.all_tasks_done.wait(timeout=delay)
+
+            return True
+        finally:
+            queue.all_tasks_done.release()
+
+    def start(self):
+        # type: () -> None
+        with self._lock:
+            if not self.is_alive:
+                self._thread = threading.Thread(
+                    target=self._target, name="sentry-sdk.BackgroundWorker"
+                )
+                self._thread.daemon = True
+                try:
+                    self._thread.start()
+                    self._thread_for_pid = os.getpid()
+                except RuntimeError:
+                    # At this point we can no longer start because the interpreter
+                    # is already shutting down.  Sadly at this point we can no longer
+                    # send out events.
+                    self._thread = None
+
+    def kill(self):
+        # type: () -> None
+        """
+        Kill worker thread. Returns immediately. Not useful for
+        waiting on shutdown for events, use `flush` for that.
+        """
+        logger.debug("background worker got kill request")
+        with self._lock:
+            if self._thread:
+                try:
+                    self._queue.put_nowait(_TERMINATOR)
+                except FullError:
+                    logger.debug("background worker queue full, kill failed")
+
+                self._thread = None
+                self._thread_for_pid = None
+
+    def flush(self, timeout, callback=None):
+        # type: (float, Optional[Any]) -> None
+        logger.debug("background worker got flush request")
+        with self._lock:
+            if self.is_alive and timeout > 0.0:
+                self._wait_flush(timeout, callback)
+        logger.debug("background worker flushed")
+
+    def full(self):
+        # type: () -> bool
+        return self._queue.full()
+
+    def _wait_flush(self, timeout, callback):
+        # type: (float, Optional[Any]) -> None
+        initial_timeout = min(0.1, timeout)
+        if not self._timed_queue_join(initial_timeout):
+            pending = self._queue.qsize() + 1
+            logger.debug("%d event(s) pending on flush", pending)
+            if callback is not None:
+                callback(pending, timeout)
+
+            if not self._timed_queue_join(timeout - initial_timeout):
+                pending = self._queue.qsize() + 1
+                logger.error("flush timed out, dropped %s events", pending)
+
+    def submit(self, callback):
+        # type: (Callable[[], None]) -> bool
+        self._ensure_thread()
+        try:
+            self._queue.put_nowait(callback)
+            return True
+        except FullError:
+            return False
+
+    def _target(self):
+        # type: () -> None
+        while True:
+            callback = self._queue.get()
+            try:
+                if callback is _TERMINATOR:
+                    break
+                try:
+                    callback()
+                except Exception:
+                    logger.error("Failed processing job", exc_info=True)
+            finally:
+                self._queue.task_done()
+            sleep(0)