about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/opentelemetry/sdk
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/opentelemetry/sdk')
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi18
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py460
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py89
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py36
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py712
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py462
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py51
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py35
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py782
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py143
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py57
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py582
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py153
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py1475
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py17
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py39
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py50
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py134
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py332
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py190
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py98
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py26
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py141
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md175
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py117
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py138
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py576
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py334
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py45
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py145
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py315
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py277
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py30
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py195
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py66
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py35
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py541
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py1305
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py517
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py61
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py60
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py453
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py152
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi74
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py167
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py15
48 files changed, 11875 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi
new file mode 100644
index 00000000..e57edc0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi
@@ -0,0 +1,18 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The OpenTelemetry SDK package is an implementation of the OpenTelemetry
+API
+"""
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py
new file mode 100644
index 00000000..c1852edd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py
@@ -0,0 +1,460 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OpenTelemetry SDK Configurator for Easy Instrumentation with Distros
+"""
+
+from __future__ import annotations
+
+import logging
+import os
+from abc import ABC, abstractmethod
+from os import environ
+from typing import Callable, Sequence, Type, Union
+
+from typing_extensions import Literal
+
+from opentelemetry._events import set_event_logger_provider
+from opentelemetry._logs import set_logger_provider
+from opentelemetry.environment_variables import (
+    OTEL_LOGS_EXPORTER,
+    OTEL_METRICS_EXPORTER,
+    OTEL_PYTHON_ID_GENERATOR,
+    OTEL_TRACES_EXPORTER,
+)
+from opentelemetry.metrics import set_meter_provider
+from opentelemetry.sdk._events import EventLoggerProvider
+from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
+from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, LogExporter
+from opentelemetry.sdk.environment_variables import (
+    _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED,
+    OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
+    OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
+    OTEL_EXPORTER_OTLP_PROTOCOL,
+    OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
+    OTEL_TRACES_SAMPLER,
+    OTEL_TRACES_SAMPLER_ARG,
+)
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics.export import (
+    MetricExporter,
+    MetricReader,
+    PeriodicExportingMetricReader,
+)
+from opentelemetry.sdk.resources import Attributes, Resource
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
+from opentelemetry.sdk.trace.id_generator import IdGenerator
+from opentelemetry.sdk.trace.sampling import Sampler
+from opentelemetry.semconv.resource import ResourceAttributes
+from opentelemetry.trace import set_tracer_provider
+from opentelemetry.util._importlib_metadata import entry_points
+
+_EXPORTER_OTLP = "otlp"
+_EXPORTER_OTLP_PROTO_GRPC = "otlp_proto_grpc"
+_EXPORTER_OTLP_PROTO_HTTP = "otlp_proto_http"
+
+_EXPORTER_BY_OTLP_PROTOCOL = {
+    "grpc": _EXPORTER_OTLP_PROTO_GRPC,
+    "http/protobuf": _EXPORTER_OTLP_PROTO_HTTP,
+}
+
+_EXPORTER_ENV_BY_SIGNAL_TYPE = {
+    "traces": OTEL_TRACES_EXPORTER,
+    "metrics": OTEL_METRICS_EXPORTER,
+    "logs": OTEL_LOGS_EXPORTER,
+}
+
+_PROTOCOL_ENV_BY_SIGNAL_TYPE = {
+    "traces": OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
+    "metrics": OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
+    "logs": OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
+}
+
+_RANDOM_ID_GENERATOR = "random"
+_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR
+
+_OTEL_SAMPLER_ENTRY_POINT_GROUP = "opentelemetry_traces_sampler"
+
+_logger = logging.getLogger(__name__)
+
+
+def _import_config_components(
+    selected_components: list[str], entry_point_name: str
+) -> Sequence[tuple[str, object]]:
+    component_implementations = []
+
+    for selected_component in selected_components:
+        try:
+            component_implementations.append(
+                (
+                    selected_component,
+                    next(
+                        iter(
+                            entry_points(
+                                group=entry_point_name, name=selected_component
+                            )
+                        )
+                    ).load(),
+                )
+            )
+        except KeyError:
+            raise RuntimeError(
+                f"Requested entry point '{entry_point_name}' not found"
+            )
+
+        except StopIteration:
+            raise RuntimeError(
+                f"Requested component '{selected_component}' not found in "
+                f"entry point '{entry_point_name}'"
+            )
+
+    return component_implementations
+
+
+def _get_sampler() -> str | None:
+    return environ.get(OTEL_TRACES_SAMPLER, None)
+
+
+def _get_id_generator() -> str:
+    return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
+
+
+def _get_exporter_entry_point(
+    exporter_name: str, signal_type: Literal["traces", "metrics", "logs"]
+):
+    if exporter_name not in (
+        _EXPORTER_OTLP,
+        _EXPORTER_OTLP_PROTO_GRPC,
+        _EXPORTER_OTLP_PROTO_HTTP,
+    ):
+        return exporter_name
+
+    # Checking env vars for OTLP protocol (grpc/http).
+    otlp_protocol = environ.get(
+        _PROTOCOL_ENV_BY_SIGNAL_TYPE[signal_type]
+    ) or environ.get(OTEL_EXPORTER_OTLP_PROTOCOL)
+
+    if not otlp_protocol:
+        if exporter_name == _EXPORTER_OTLP:
+            return _EXPORTER_OTLP_PROTO_GRPC
+        return exporter_name
+
+    otlp_protocol = otlp_protocol.strip()
+
+    if exporter_name == _EXPORTER_OTLP:
+        if otlp_protocol not in _EXPORTER_BY_OTLP_PROTOCOL:
+            # Invalid value was set by the env var
+            raise RuntimeError(
+                f"Unsupported OTLP protocol '{otlp_protocol}' is configured"
+            )
+
+        return _EXPORTER_BY_OTLP_PROTOCOL[otlp_protocol]
+
+    # grpc/http already specified by exporter_name, only add a warning in case
+    # of a conflict.
+    exporter_name_by_env = _EXPORTER_BY_OTLP_PROTOCOL.get(otlp_protocol)
+    if exporter_name_by_env and exporter_name != exporter_name_by_env:
+        _logger.warning(
+            "Conflicting values for %s OTLP exporter protocol, using '%s'",
+            signal_type,
+            exporter_name,
+        )
+
+    return exporter_name
+
+
+def _get_exporter_names(
+    signal_type: Literal["traces", "metrics", "logs"],
+) -> Sequence[str]:
+    names = environ.get(_EXPORTER_ENV_BY_SIGNAL_TYPE.get(signal_type, ""))
+
+    if not names or names.lower().strip() == "none":
+        return []
+
+    return [
+        _get_exporter_entry_point(_exporter.strip(), signal_type)
+        for _exporter in names.split(",")
+    ]
+
+
+def _init_tracing(
+    exporters: dict[str, Type[SpanExporter]],
+    id_generator: IdGenerator | None = None,
+    sampler: Sampler | None = None,
+    resource: Resource | None = None,
+):
+    provider = TracerProvider(
+        id_generator=id_generator,
+        sampler=sampler,
+        resource=resource,
+    )
+    set_tracer_provider(provider)
+
+    for _, exporter_class in exporters.items():
+        exporter_args = {}
+        provider.add_span_processor(
+            BatchSpanProcessor(exporter_class(**exporter_args))
+        )
+
+
+def _init_metrics(
+    exporters_or_readers: dict[
+        str, Union[Type[MetricExporter], Type[MetricReader]]
+    ],
+    resource: Resource | None = None,
+):
+    metric_readers = []
+
+    for _, exporter_or_reader_class in exporters_or_readers.items():
+        exporter_args = {}
+
+        if issubclass(exporter_or_reader_class, MetricReader):
+            metric_readers.append(exporter_or_reader_class(**exporter_args))
+        else:
+            metric_readers.append(
+                PeriodicExportingMetricReader(
+                    exporter_or_reader_class(**exporter_args)
+                )
+            )
+
+    provider = MeterProvider(resource=resource, metric_readers=metric_readers)
+    set_meter_provider(provider)
+
+
+def _init_logging(
+    exporters: dict[str, Type[LogExporter]],
+    resource: Resource | None = None,
+    setup_logging_handler: bool = True,
+):
+    provider = LoggerProvider(resource=resource)
+    set_logger_provider(provider)
+
+    for _, exporter_class in exporters.items():
+        exporter_args = {}
+        provider.add_log_record_processor(
+            BatchLogRecordProcessor(exporter_class(**exporter_args))
+        )
+
+    event_logger_provider = EventLoggerProvider(logger_provider=provider)
+    set_event_logger_provider(event_logger_provider)
+
+    if setup_logging_handler:
+        handler = LoggingHandler(
+            level=logging.NOTSET, logger_provider=provider
+        )
+        logging.getLogger().addHandler(handler)
+
+
+def _import_exporters(
+    trace_exporter_names: Sequence[str],
+    metric_exporter_names: Sequence[str],
+    log_exporter_names: Sequence[str],
+) -> tuple[
+    dict[str, Type[SpanExporter]],
+    dict[str, Union[Type[MetricExporter], Type[MetricReader]]],
+    dict[str, Type[LogExporter]],
+]:
+    trace_exporters = {}
+    metric_exporters = {}
+    log_exporters = {}
+
+    for (
+        exporter_name,
+        exporter_impl,
+    ) in _import_config_components(
+        trace_exporter_names, "opentelemetry_traces_exporter"
+    ):
+        if issubclass(exporter_impl, SpanExporter):
+            trace_exporters[exporter_name] = exporter_impl
+        else:
+            raise RuntimeError(f"{exporter_name} is not a trace exporter")
+
+    for (
+        exporter_name,
+        exporter_impl,
+    ) in _import_config_components(
+        metric_exporter_names, "opentelemetry_metrics_exporter"
+    ):
+        # The metric exporter components may be push MetricExporter or pull exporters which
+        # subclass MetricReader directly
+        if issubclass(exporter_impl, (MetricExporter, MetricReader)):
+            metric_exporters[exporter_name] = exporter_impl
+        else:
+            raise RuntimeError(f"{exporter_name} is not a metric exporter")
+
+    for (
+        exporter_name,
+        exporter_impl,
+    ) in _import_config_components(
+        log_exporter_names, "opentelemetry_logs_exporter"
+    ):
+        if issubclass(exporter_impl, LogExporter):
+            log_exporters[exporter_name] = exporter_impl
+        else:
+            raise RuntimeError(f"{exporter_name} is not a log exporter")
+
+    return trace_exporters, metric_exporters, log_exporters
+
+
+def _import_sampler_factory(sampler_name: str) -> Callable[[str], Sampler]:
+    _, sampler_impl = _import_config_components(
+        [sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP
+    )[0]
+    return sampler_impl
+
+
+def _import_sampler(sampler_name: str) -> Sampler | None:
+    if not sampler_name:
+        return None
+    try:
+        sampler_factory = _import_sampler_factory(sampler_name)
+        arg = None
+        if sampler_name in ("traceidratio", "parentbased_traceidratio"):
+            try:
+                rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG))
+            except (ValueError, TypeError):
+                _logger.warning(
+                    "Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0."
+                )
+                rate = 1.0
+            arg = rate
+        else:
+            arg = os.getenv(OTEL_TRACES_SAMPLER_ARG)
+
+        sampler = sampler_factory(arg)
+        if not isinstance(sampler, Sampler):
+            message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler."
+            _logger.warning(message)
+            raise ValueError(message)
+        return sampler
+    except Exception as exc:  # pylint: disable=broad-exception-caught
+        _logger.warning(
+            "Using default sampler. Failed to initialize sampler, %s: %s",
+            sampler_name,
+            exc,
+        )
+        return None
+
+
+def _import_id_generator(id_generator_name: str) -> IdGenerator:
+    id_generator_name, id_generator_impl = _import_config_components(
+        [id_generator_name.strip()], "opentelemetry_id_generator"
+    )[0]
+
+    if issubclass(id_generator_impl, IdGenerator):
+        return id_generator_impl()
+
+    raise RuntimeError(f"{id_generator_name} is not an IdGenerator")
+
+
+def _initialize_components(
+    auto_instrumentation_version: str | None = None,
+    trace_exporter_names: list[str] | None = None,
+    metric_exporter_names: list[str] | None = None,
+    log_exporter_names: list[str] | None = None,
+    sampler: Sampler | None = None,
+    resource_attributes: Attributes | None = None,
+    id_generator: IdGenerator | None = None,
+    setup_logging_handler: bool | None = None,
+):
+    if trace_exporter_names is None:
+        trace_exporter_names = []
+    if metric_exporter_names is None:
+        metric_exporter_names = []
+    if log_exporter_names is None:
+        log_exporter_names = []
+    span_exporters, metric_exporters, log_exporters = _import_exporters(
+        trace_exporter_names + _get_exporter_names("traces"),
+        metric_exporter_names + _get_exporter_names("metrics"),
+        log_exporter_names + _get_exporter_names("logs"),
+    )
+    if sampler is None:
+        sampler_name = _get_sampler()
+        sampler = _import_sampler(sampler_name)
+    if id_generator is None:
+        id_generator_name = _get_id_generator()
+        id_generator = _import_id_generator(id_generator_name)
+    if resource_attributes is None:
+        resource_attributes = {}
+    # populate version if using auto-instrumentation
+    if auto_instrumentation_version:
+        resource_attributes[ResourceAttributes.TELEMETRY_AUTO_VERSION] = (
+            auto_instrumentation_version
+        )
+    # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
+    # from the env variable else defaults to "unknown_service"
+    resource = Resource.create(resource_attributes)
+
+    _init_tracing(
+        exporters=span_exporters,
+        id_generator=id_generator,
+        sampler=sampler,
+        resource=resource,
+    )
+    _init_metrics(metric_exporters, resource)
+    if setup_logging_handler is None:
+        setup_logging_handler = (
+            os.getenv(
+                _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, "false"
+            )
+            .strip()
+            .lower()
+            == "true"
+        )
+    _init_logging(log_exporters, resource, setup_logging_handler)
+
+
+class _BaseConfigurator(ABC):
+    """An ABC for configurators
+
+    Configurators are used to configure
+    SDKs (i.e. TracerProvider, MeterProvider, Processors...)
+    to reduce the amount of manual configuration required.
+    """
+
+    _instance = None
+    _is_instrumented = False
+
+    def __new__(cls, *args, **kwargs):
+        if cls._instance is None:
+            cls._instance = object.__new__(cls, *args, **kwargs)
+
+        return cls._instance
+
+    @abstractmethod
+    def _configure(self, **kwargs):
+        """Configure the SDK"""
+
+    def configure(self, **kwargs):
+        """Configure the SDK"""
+        self._configure(**kwargs)
+
+
+class _OTelSDKConfigurator(_BaseConfigurator):
+    """A basic Configurator by OTel Python for initializing OTel SDK components
+
+    Initializes several crucial OTel SDK components (i.e. TracerProvider,
+    MeterProvider, Processors...) according to a default implementation. Other
+    Configurators can subclass and slightly alter this initialization.
+
+    NOTE: This class should not be instantiated nor should it become an entry
+    point on the `opentelemetry-sdk` package. Instead, distros should subclass
+    this Configurator and enhance it as needed.
+    """
+
+    def _configure(self, **kwargs):
+        _initialize_components(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py
new file mode 100644
index 00000000..ae163025
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py
@@ -0,0 +1,89 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from time import time_ns
+from typing import Optional
+
+from opentelemetry import trace
+from opentelemetry._events import Event
+from opentelemetry._events import EventLogger as APIEventLogger
+from opentelemetry._events import EventLoggerProvider as APIEventLoggerProvider
+from opentelemetry._logs import NoOpLogger, SeverityNumber, get_logger_provider
+from opentelemetry.sdk._logs import Logger, LoggerProvider, LogRecord
+from opentelemetry.util.types import Attributes
+
+_logger = logging.getLogger(__name__)
+
+
+class EventLogger(APIEventLogger):
+    def __init__(
+        self,
+        logger_provider: LoggerProvider,
+        name: str,
+        version: Optional[str] = None,
+        schema_url: Optional[str] = None,
+        attributes: Optional[Attributes] = None,
+    ):
+        super().__init__(
+            name=name,
+            version=version,
+            schema_url=schema_url,
+            attributes=attributes,
+        )
+        self._logger: Logger = logger_provider.get_logger(
+            name, version, schema_url, attributes
+        )
+
+    def emit(self, event: Event) -> None:
+        if isinstance(self._logger, NoOpLogger):
+            # Do nothing if SDK is disabled
+            return
+        span_context = trace.get_current_span().get_span_context()
+        log_record = LogRecord(
+            timestamp=event.timestamp or time_ns(),
+            observed_timestamp=None,
+            trace_id=event.trace_id or span_context.trace_id,
+            span_id=event.span_id or span_context.span_id,
+            trace_flags=event.trace_flags or span_context.trace_flags,
+            severity_text=None,
+            severity_number=event.severity_number or SeverityNumber.INFO,
+            body=event.body,
+            resource=getattr(self._logger, "resource", None),
+            attributes=event.attributes,
+        )
+        self._logger.emit(log_record)
+
+
+class EventLoggerProvider(APIEventLoggerProvider):
+    def __init__(self, logger_provider: Optional[LoggerProvider] = None):
+        self._logger_provider = logger_provider or get_logger_provider()
+
+    def get_event_logger(
+        self,
+        name: str,
+        version: Optional[str] = None,
+        schema_url: Optional[str] = None,
+        attributes: Optional[Attributes] = None,
+    ) -> EventLogger:
+        if not name:
+            _logger.warning("EventLogger created with invalid name: %s", name)
+        return EventLogger(
+            self._logger_provider, name, version, schema_url, attributes
+        )
+
+    def shutdown(self):
+        self._logger_provider.shutdown()
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        self._logger_provider.force_flush(timeout_millis)
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py
new file mode 100644
index 00000000..0254c135
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py
@@ -0,0 +1,36 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from opentelemetry.sdk._logs._internal import (
+    LogData,
+    LogDroppedAttributesWarning,
+    Logger,
+    LoggerProvider,
+    LoggingHandler,
+    LogLimits,
+    LogRecord,
+    LogRecordProcessor,
+)
+
+__all__ = [
+    "LogData",
+    "Logger",
+    "LoggerProvider",
+    "LoggingHandler",
+    "LogLimits",
+    "LogRecord",
+    "LogRecordProcessor",
+    "LogDroppedAttributesWarning",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py
new file mode 100644
index 00000000..302ca1ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py
@@ -0,0 +1,712 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+import abc
+import atexit
+import concurrent.futures
+import json
+import logging
+import threading
+import traceback
+import warnings
+from os import environ
+from threading import Lock
+from time import time_ns
+from typing import Any, Callable, Tuple, Union  # noqa
+
+from opentelemetry._logs import Logger as APILogger
+from opentelemetry._logs import LoggerProvider as APILoggerProvider
+from opentelemetry._logs import LogRecord as APILogRecord
+from opentelemetry._logs import (
+    NoOpLogger,
+    SeverityNumber,
+    get_logger,
+    get_logger_provider,
+    std_to_otel,
+)
+from opentelemetry.attributes import BoundedAttributes
+from opentelemetry.sdk.environment_variables import (
+    OTEL_ATTRIBUTE_COUNT_LIMIT,
+    OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+    OTEL_SDK_DISABLED,
+)
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.util import ns_to_iso_str
+from opentelemetry.sdk.util.instrumentation import InstrumentationScope
+from opentelemetry.semconv.trace import SpanAttributes
+from opentelemetry.trace import (
+    format_span_id,
+    format_trace_id,
+    get_current_span,
+)
+from opentelemetry.trace.span import TraceFlags
+from opentelemetry.util.types import AnyValue, Attributes
+
+_logger = logging.getLogger(__name__)
+
+_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128
+_ENV_VALUE_UNSET = ""
+
+
+class LogDroppedAttributesWarning(UserWarning):
+    """Custom warning to indicate dropped log attributes due to limits.
+
+    This class is used to filter and handle these specific warnings separately
+    from other warnings, ensuring that they are only shown once without
+    interfering with default user warnings.
+    """
+
+
+warnings.simplefilter("once", LogDroppedAttributesWarning)
+
+
+class LogLimits:
+    """This class is based on a SpanLimits class in the Tracing module.
+
+    This class represents the limits that should be enforced on recorded data such as events, links, attributes etc.
+
+    This class does not enforce any limits itself. It only provides a way to read limits from env,
+    default values and from user provided arguments.
+
+    All limit arguments must be either a non-negative integer, ``None`` or ``LogLimits.UNSET``.
+
+    - All limit arguments are optional.
+    - If a limit argument is not set, the class will try to read its value from the corresponding
+      environment variable.
+    - If the environment variable is not set, the default value, if any, will be used.
+
+    Limit precedence:
+
+    - If a model specific limit is set, it will be used.
+    - Else if the corresponding global limit is set, it will be used.
+    - Else if the model specific limit has a default value, the default value will be used.
+    - Else if the global limit has a default value, the default value will be used.
+
+    Args:
+        max_attributes: Maximum number of attributes that can be added to a span, event, and link.
+            Environment variable: ``OTEL_ATTRIBUTE_COUNT_LIMIT``
+            Default: {_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT}
+        max_attribute_length: Maximum length an attribute value can have. Values longer than
+            the specified length will be truncated.
+    """
+
+    UNSET = -1
+
+    def __init__(
+        self,
+        max_attributes: int | None = None,
+        max_attribute_length: int | None = None,
+    ):
+        # attribute count
+        global_max_attributes = self._from_env_if_absent(
+            max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT
+        )
+        self.max_attributes = (
+            global_max_attributes
+            if global_max_attributes is not None
+            else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT
+        )
+
+        # attribute length
+        self.max_attribute_length = self._from_env_if_absent(
+            max_attribute_length,
+            OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+        )
+
+    def __repr__(self):
+        return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})"
+
+    @classmethod
+    def _from_env_if_absent(
+        cls, value: int | None, env_var: str, default: int | None = None
+    ) -> int | None:
+        if value == cls.UNSET:
+            return None
+
+        err_msg = "{} must be a non-negative integer but got {}"
+
+        # if no value is provided for the limit, try to load it from env
+        if value is None:
+            # return default value if env var is not set
+            if env_var not in environ:
+                return default
+
+            str_value = environ.get(env_var, "").strip().lower()
+            if str_value == _ENV_VALUE_UNSET:
+                return None
+
+            try:
+                value = int(str_value)
+            except ValueError:
+                raise ValueError(err_msg.format(env_var, str_value))
+
+        if value < 0:
+            raise ValueError(err_msg.format(env_var, value))
+        return value
+
+
+_UnsetLogLimits = LogLimits(
+    max_attributes=LogLimits.UNSET,
+    max_attribute_length=LogLimits.UNSET,
+)
+
+
+class LogRecord(APILogRecord):
+    """A LogRecord instance represents an event being logged.
+
+    LogRecord instances are created and emitted via `Logger`
+    every time something is logged. They contain all the information
+    pertinent to the event being logged.
+    """
+
+    def __init__(
+        self,
+        timestamp: int | None = None,
+        observed_timestamp: int | None = None,
+        trace_id: int | None = None,
+        span_id: int | None = None,
+        trace_flags: TraceFlags | None = None,
+        severity_text: str | None = None,
+        severity_number: SeverityNumber | None = None,
+        body: AnyValue | None = None,
+        resource: Resource | None = None,
+        attributes: Attributes | None = None,
+        limits: LogLimits | None = _UnsetLogLimits,
+    ):
+        super().__init__(
+            **{
+                "timestamp": timestamp,
+                "observed_timestamp": observed_timestamp,
+                "trace_id": trace_id,
+                "span_id": span_id,
+                "trace_flags": trace_flags,
+                "severity_text": severity_text,
+                "severity_number": severity_number,
+                "body": body,
+                "attributes": BoundedAttributes(
+                    maxlen=limits.max_attributes,
+                    attributes=attributes if bool(attributes) else None,
+                    immutable=False,
+                    max_value_len=limits.max_attribute_length,
+                ),
+            }
+        )
+        self.resource = (
+            resource if isinstance(resource, Resource) else Resource.create({})
+        )
+        if self.dropped_attributes > 0:
+            warnings.warn(
+                "Log record attributes were dropped due to limits",
+                LogDroppedAttributesWarning,
+                stacklevel=2,
+            )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, LogRecord):
+            return NotImplemented
+        return self.__dict__ == other.__dict__
+
+    def to_json(self, indent: int | None = 4) -> str:
+        return json.dumps(
+            {
+                "body": self.body,
+                "severity_number": self.severity_number.value
+                if self.severity_number is not None
+                else None,
+                "severity_text": self.severity_text,
+                "attributes": (
+                    dict(self.attributes) if bool(self.attributes) else None
+                ),
+                "dropped_attributes": self.dropped_attributes,
+                "timestamp": ns_to_iso_str(self.timestamp),
+                "observed_timestamp": ns_to_iso_str(self.observed_timestamp),
+                "trace_id": (
+                    f"0x{format_trace_id(self.trace_id)}"
+                    if self.trace_id is not None
+                    else ""
+                ),
+                "span_id": (
+                    f"0x{format_span_id(self.span_id)}"
+                    if self.span_id is not None
+                    else ""
+                ),
+                "trace_flags": self.trace_flags,
+                "resource": json.loads(self.resource.to_json()),
+            },
+            indent=indent,
+        )
+
+    @property
+    def dropped_attributes(self) -> int:
+        if self.attributes:
+            return self.attributes.dropped
+        return 0
+
+
+class LogData:
+    """Readable LogRecord data plus associated InstrumentationLibrary."""
+
+    def __init__(
+        self,
+        log_record: LogRecord,
+        instrumentation_scope: InstrumentationScope,
+    ):
+        self.log_record = log_record
+        self.instrumentation_scope = instrumentation_scope
+
+
+class LogRecordProcessor(abc.ABC):
+    """Interface to hook the log record emitting action.
+
+    Log processors can be registered directly using
+    :func:`LoggerProvider.add_log_record_processor` and they are invoked
+    in the same order as they were registered.
+    """
+
+    @abc.abstractmethod
+    def emit(self, log_data: LogData):
+        """Emits the `LogData`"""
+
+    @abc.abstractmethod
+    def shutdown(self):
+        """Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown"""
+
+    @abc.abstractmethod
+    def force_flush(self, timeout_millis: int = 30000):
+        """Export all the received logs to the configured Exporter that have not yet
+        been exported.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for logs to be
+                exported.
+
+        Returns:
+            False if the timeout is exceeded, True otherwise.
+        """
+
+
+# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved
+# pylint:disable=no-member
+class SynchronousMultiLogRecordProcessor(LogRecordProcessor):
+    """Implementation of class:`LogRecordProcessor` that forwards all received
+    events to a list of log processors sequentially.
+
+    The underlying log processors are called in sequential order as they were
+    added.
+    """
+
+    def __init__(self):
+        # use a tuple to avoid race conditions when adding a new log and
+        # iterating through it on "emit".
+        self._log_record_processors = ()  # type: Tuple[LogRecordProcessor, ...]
+        self._lock = threading.Lock()
+
+    def add_log_record_processor(
+        self, log_record_processor: LogRecordProcessor
+    ) -> None:
+        """Adds a Logprocessor to the list of log processors handled by this instance"""
+        with self._lock:
+            self._log_record_processors += (log_record_processor,)
+
+    def emit(self, log_data: LogData) -> None:
+        for lp in self._log_record_processors:
+            lp.emit(log_data)
+
+    def shutdown(self) -> None:
+        """Shutdown the log processors one by one"""
+        for lp in self._log_record_processors:
+            lp.shutdown()
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Force flush the log processors one by one
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for logs to be
+                exported. If the first n log processors exceeded the timeout
+                then remaining log processors will not be flushed.
+
+        Returns:
+            True if all the log processors flushes the logs within timeout,
+            False otherwise.
+        """
+        deadline_ns = time_ns() + timeout_millis * 1000000
+        for lp in self._log_record_processors:
+            current_ts = time_ns()
+            if current_ts >= deadline_ns:
+                return False
+
+            if not lp.force_flush((deadline_ns - current_ts) // 1000000):
+                return False
+
+        return True
+
+
+class ConcurrentMultiLogRecordProcessor(LogRecordProcessor):
+    """Implementation of :class:`LogRecordProcessor` that forwards all received
+    events to a list of log processors in parallel.
+
+    Calls to the underlying log processors are forwarded in parallel by
+    submitting them to a thread pool executor and waiting until each log
+    processor finished its work.
+
+    Args:
+        max_workers: The number of threads managed by the thread pool executor
+            and thus defining how many log processors can work in parallel.
+    """
+
+    def __init__(self, max_workers: int = 2):
+        # use a tuple to avoid race conditions when adding a new log and
+        # iterating through it on "emit".
+        self._log_record_processors = ()  # type: Tuple[LogRecordProcessor, ...]
+        self._lock = threading.Lock()
+        self._executor = concurrent.futures.ThreadPoolExecutor(
+            max_workers=max_workers
+        )
+
+    def add_log_record_processor(
+        self, log_record_processor: LogRecordProcessor
+    ):
+        with self._lock:
+            self._log_record_processors += (log_record_processor,)
+
+    def _submit_and_wait(
+        self,
+        func: Callable[[LogRecordProcessor], Callable[..., None]],
+        *args: Any,
+        **kwargs: Any,
+    ):
+        futures = []
+        for lp in self._log_record_processors:
+            future = self._executor.submit(func(lp), *args, **kwargs)
+            futures.append(future)
+        for future in futures:
+            future.result()
+
+    def emit(self, log_data: LogData):
+        self._submit_and_wait(lambda lp: lp.emit, log_data)
+
+    def shutdown(self):
+        self._submit_and_wait(lambda lp: lp.shutdown)
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Force flush the log processors in parallel.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for logs to be
+                exported.
+
+        Returns:
+            True if all the log processors flushes the logs within timeout,
+            False otherwise.
+        """
+        futures = []
+        for lp in self._log_record_processors:
+            future = self._executor.submit(lp.force_flush, timeout_millis)
+            futures.append(future)
+
+        done_futures, not_done_futures = concurrent.futures.wait(
+            futures, timeout_millis / 1e3
+        )
+
+        if not_done_futures:
+            return False
+
+        for future in done_futures:
+            if not future.result():
+                return False
+
+        return True
+
+
+# skip natural LogRecord attributes
+# http://docs.python.org/library/logging.html#logrecord-attributes
+_RESERVED_ATTRS = frozenset(
+    (
+        "asctime",
+        "args",
+        "created",
+        "exc_info",
+        "exc_text",
+        "filename",
+        "funcName",
+        "getMessage",
+        "message",
+        "levelname",
+        "levelno",
+        "lineno",
+        "module",
+        "msecs",
+        "msg",
+        "name",
+        "pathname",
+        "process",
+        "processName",
+        "relativeCreated",
+        "stack_info",
+        "thread",
+        "threadName",
+        "taskName",
+    )
+)
+
+
+class LoggingHandler(logging.Handler):
+    """A handler class which writes logging records, in OTLP format, to
+    a network destination or file. Supports signals from the `logging` module.
+    https://docs.python.org/3/library/logging.html
+    """
+
+    def __init__(
+        self,
+        level=logging.NOTSET,
+        logger_provider=None,
+    ) -> None:
+        super().__init__(level=level)
+        self._logger_provider = logger_provider or get_logger_provider()
+
+    @staticmethod
+    def _get_attributes(record: logging.LogRecord) -> Attributes:
+        attributes = {
+            k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS
+        }
+
+        # Add standard code attributes for logs.
+        attributes[SpanAttributes.CODE_FILEPATH] = record.pathname
+        attributes[SpanAttributes.CODE_FUNCTION] = record.funcName
+        attributes[SpanAttributes.CODE_LINENO] = record.lineno
+
+        if record.exc_info:
+            exctype, value, tb = record.exc_info
+            if exctype is not None:
+                attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__
+            if value is not None and value.args:
+                attributes[SpanAttributes.EXCEPTION_MESSAGE] = str(
+                    value.args[0]
+                )
+            if tb is not None:
+                # https://github.com/open-telemetry/opentelemetry-specification/blob/9fa7c656b26647b27e485a6af7e38dc716eba98a/specification/trace/semantic_conventions/exceptions.md#stacktrace-representation
+                attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join(
+                    traceback.format_exception(*record.exc_info)
+                )
+        return attributes
+
+    def _translate(self, record: logging.LogRecord) -> LogRecord:
+        timestamp = int(record.created * 1e9)
+        observered_timestamp = time_ns()
+        span_context = get_current_span().get_span_context()
+        attributes = self._get_attributes(record)
+        severity_number = std_to_otel(record.levelno)
+        if self.formatter:
+            body = self.format(record)
+        else:
+            # `record.getMessage()` uses `record.msg` as a template to format
+            # `record.args` into. There is a special case in `record.getMessage()`
+            # where it will only attempt formatting if args are provided,
+            # otherwise, it just stringifies `record.msg`.
+            #
+            # Since the OTLP body field has a type of 'any' and the logging module
+            # is sometimes used in such a way that objects incorrectly end up
+            # set as record.msg, in those cases we would like to bypass
+            # `record.getMessage()` completely and set the body to the object
+            # itself instead of its string representation.
+            # For more background, see: https://github.com/open-telemetry/opentelemetry-python/pull/4216
+            if not record.args and not isinstance(record.msg, str):
+                # no args are provided so it's *mostly* safe to use the message template as the body
+                body = record.msg
+            else:
+                body = record.getMessage()
+
+        # related to https://github.com/open-telemetry/opentelemetry-python/issues/3548
+        # Severity Text = WARN as defined in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity.
+        level_name = (
+            "WARN" if record.levelname == "WARNING" else record.levelname
+        )
+
+        logger = get_logger(record.name, logger_provider=self._logger_provider)
+        return LogRecord(
+            timestamp=timestamp,
+            observed_timestamp=observered_timestamp,
+            trace_id=span_context.trace_id,
+            span_id=span_context.span_id,
+            trace_flags=span_context.trace_flags,
+            severity_text=level_name,
+            severity_number=severity_number,
+            body=body,
+            resource=logger.resource,
+            attributes=attributes,
+        )
+
+    def emit(self, record: logging.LogRecord) -> None:
+        """
+        Emit a record. Skip emitting if logger is NoOp.
+
+        The record is translated to OTel format, and then sent across the pipeline.
+        """
+        logger = get_logger(record.name, logger_provider=self._logger_provider)
+        if not isinstance(logger, NoOpLogger):
+            logger.emit(self._translate(record))
+
+    def flush(self) -> None:
+        """
+        Flushes the logging output. Skip flushing if logging_provider has no force_flush method.
+        """
+        if hasattr(self._logger_provider, "force_flush") and callable(
+            self._logger_provider.force_flush
+        ):
+            self._logger_provider.force_flush()
+
+
+class Logger(APILogger):
+    def __init__(
+        self,
+        resource: Resource,
+        multi_log_record_processor: Union[
+            SynchronousMultiLogRecordProcessor,
+            ConcurrentMultiLogRecordProcessor,
+        ],
+        instrumentation_scope: InstrumentationScope,
+    ):
+        super().__init__(
+            instrumentation_scope.name,
+            instrumentation_scope.version,
+            instrumentation_scope.schema_url,
+            instrumentation_scope.attributes,
+        )
+        self._resource = resource
+        self._multi_log_record_processor = multi_log_record_processor
+        self._instrumentation_scope = instrumentation_scope
+
+    @property
+    def resource(self):
+        return self._resource
+
+    def emit(self, record: LogRecord):
+        """Emits the :class:`LogData` by associating :class:`LogRecord`
+        and instrumentation info.
+        """
+        log_data = LogData(record, self._instrumentation_scope)
+        self._multi_log_record_processor.emit(log_data)
+
+
+class LoggerProvider(APILoggerProvider):
+    def __init__(
+        self,
+        resource: Resource | None = None,
+        shutdown_on_exit: bool = True,
+        multi_log_record_processor: SynchronousMultiLogRecordProcessor
+        | ConcurrentMultiLogRecordProcessor
+        | None = None,
+    ):
+        if resource is None:
+            self._resource = Resource.create({})
+        else:
+            self._resource = resource
+        self._multi_log_record_processor = (
+            multi_log_record_processor or SynchronousMultiLogRecordProcessor()
+        )
+        disabled = environ.get(OTEL_SDK_DISABLED, "")
+        self._disabled = disabled.lower().strip() == "true"
+        self._at_exit_handler = None
+        if shutdown_on_exit:
+            self._at_exit_handler = atexit.register(self.shutdown)
+        self._logger_cache = {}
+        self._logger_cache_lock = Lock()
+
+    @property
+    def resource(self):
+        return self._resource
+
+    def _get_logger_no_cache(
+        self,
+        name: str,
+        version: str | None = None,
+        schema_url: str | None = None,
+        attributes: Attributes | None = None,
+    ) -> Logger:
+        return Logger(
+            self._resource,
+            self._multi_log_record_processor,
+            InstrumentationScope(
+                name,
+                version,
+                schema_url,
+                attributes,
+            ),
+        )
+
+    def _get_logger_cached(
+        self,
+        name: str,
+        version: str | None = None,
+        schema_url: str | None = None,
+    ) -> Logger:
+        with self._logger_cache_lock:
+            key = (name, version, schema_url)
+            if key in self._logger_cache:
+                return self._logger_cache[key]
+
+            self._logger_cache[key] = self._get_logger_no_cache(
+                name, version, schema_url
+            )
+            return self._logger_cache[key]
+
+    def get_logger(
+        self,
+        name: str,
+        version: str | None = None,
+        schema_url: str | None = None,
+        attributes: Attributes | None = None,
+    ) -> Logger:
+        if self._disabled:
+            return NoOpLogger(
+                name,
+                version=version,
+                schema_url=schema_url,
+                attributes=attributes,
+            )
+        if attributes is None:
+            return self._get_logger_cached(name, version, schema_url)
+        return self._get_logger_no_cache(name, version, schema_url, attributes)
+
+    def add_log_record_processor(
+        self, log_record_processor: LogRecordProcessor
+    ):
+        """Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance.
+
+        The log processors are invoked in the same order they are registered.
+        """
+        self._multi_log_record_processor.add_log_record_processor(
+            log_record_processor
+        )
+
+    def shutdown(self):
+        """Shuts down the log processors."""
+        self._multi_log_record_processor.shutdown()
+        if self._at_exit_handler is not None:
+            atexit.unregister(self._at_exit_handler)
+            self._at_exit_handler = None
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Force flush the log processors.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for logs to be
+                exported.
+
+        Returns:
+            True if all the log processors flushes the logs within timeout,
+            False otherwise.
+        """
+        return self._multi_log_record_processor.force_flush(timeout_millis)
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py
new file mode 100644
index 00000000..434dc745
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py
@@ -0,0 +1,462 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+import abc
+import collections
+import enum
+import logging
+import os
+import sys
+import threading
+from os import environ, linesep
+from time import time_ns
+from typing import IO, Callable, Deque, List, Optional, Sequence
+
+from opentelemetry.context import (
+    _SUPPRESS_INSTRUMENTATION_KEY,
+    attach,
+    detach,
+    set_value,
+)
+from opentelemetry.sdk._logs import LogData, LogRecord, LogRecordProcessor
+from opentelemetry.sdk.environment_variables import (
+    OTEL_BLRP_EXPORT_TIMEOUT,
+    OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
+    OTEL_BLRP_MAX_QUEUE_SIZE,
+    OTEL_BLRP_SCHEDULE_DELAY,
+)
+from opentelemetry.util._once import Once
+
+_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000
+_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512
+_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000
+_DEFAULT_MAX_QUEUE_SIZE = 2048
+_ENV_VAR_INT_VALUE_ERROR_MESSAGE = (
+    "Unable to parse value for %s as integer. Defaulting to %s."
+)
+
+_logger = logging.getLogger(__name__)
+
+
+class LogExportResult(enum.Enum):
+    SUCCESS = 0
+    FAILURE = 1
+
+
+class LogExporter(abc.ABC):
+    """Interface for exporting logs.
+
+    Interface to be implemented by services that want to export logs received
+    in their own format.
+
+    To export data this MUST be registered to the :class`opentelemetry.sdk._logs.Logger` using a
+    log processor.
+    """
+
+    @abc.abstractmethod
+    def export(self, batch: Sequence[LogData]):
+        """Exports a batch of logs.
+
+        Args:
+            batch: The list of `LogData` objects to be exported
+
+        Returns:
+            The result of the export
+        """
+
+    @abc.abstractmethod
+    def shutdown(self):
+        """Shuts down the exporter.
+
+        Called when the SDK is shut down.
+        """
+
+
+class ConsoleLogExporter(LogExporter):
+    """Implementation of :class:`LogExporter` that prints log records to the
+    console.
+
+    This class can be used for diagnostic purposes. It prints the exported
+    log records to the console STDOUT.
+    """
+
+    def __init__(
+        self,
+        out: IO = sys.stdout,
+        formatter: Callable[[LogRecord], str] = lambda record: record.to_json()
+        + linesep,
+    ):
+        self.out = out
+        self.formatter = formatter
+
+    def export(self, batch: Sequence[LogData]):
+        for data in batch:
+            self.out.write(self.formatter(data.log_record))
+        self.out.flush()
+        return LogExportResult.SUCCESS
+
+    def shutdown(self):
+        pass
+
+
+class SimpleLogRecordProcessor(LogRecordProcessor):
+    """This is an implementation of LogRecordProcessor which passes
+    received logs in the export-friendly LogData representation to the
+    configured LogExporter, as soon as they are emitted.
+    """
+
+    def __init__(self, exporter: LogExporter):
+        self._exporter = exporter
+        self._shutdown = False
+
+    def emit(self, log_data: LogData):
+        if self._shutdown:
+            _logger.warning("Processor is already shutdown, ignoring call")
+            return
+        token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
+        try:
+            self._exporter.export((log_data,))
+        except Exception:  # pylint: disable=broad-exception-caught
+            _logger.exception("Exception while exporting logs.")
+        detach(token)
+
+    def shutdown(self):
+        self._shutdown = True
+        self._exporter.shutdown()
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:  # pylint: disable=no-self-use
+        return True
+
+
+class _FlushRequest:
+    __slots__ = ["event", "num_log_records"]
+
+    def __init__(self):
+        self.event = threading.Event()
+        self.num_log_records = 0
+
+
+_BSP_RESET_ONCE = Once()
+
+
+class BatchLogRecordProcessor(LogRecordProcessor):
+    """This is an implementation of LogRecordProcessor which creates batches of
+    received logs in the export-friendly LogData representation and
+    send to the configured LogExporter, as soon as they are emitted.
+
+    `BatchLogRecordProcessor` is configurable with the following environment
+    variables which correspond to constructor parameters:
+
+    - :envvar:`OTEL_BLRP_SCHEDULE_DELAY`
+    - :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE`
+    - :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE`
+    - :envvar:`OTEL_BLRP_EXPORT_TIMEOUT`
+    """
+
+    _queue: Deque[LogData]
+    _flush_request: _FlushRequest | None
+    _log_records: List[LogData | None]
+
+    def __init__(
+        self,
+        exporter: LogExporter,
+        schedule_delay_millis: float | None = None,
+        max_export_batch_size: int | None = None,
+        export_timeout_millis: float | None = None,
+        max_queue_size: int | None = None,
+    ):
+        if max_queue_size is None:
+            max_queue_size = BatchLogRecordProcessor._default_max_queue_size()
+
+        if schedule_delay_millis is None:
+            schedule_delay_millis = (
+                BatchLogRecordProcessor._default_schedule_delay_millis()
+            )
+
+        if max_export_batch_size is None:
+            max_export_batch_size = (
+                BatchLogRecordProcessor._default_max_export_batch_size()
+            )
+
+        if export_timeout_millis is None:
+            export_timeout_millis = (
+                BatchLogRecordProcessor._default_export_timeout_millis()
+            )
+
+        BatchLogRecordProcessor._validate_arguments(
+            max_queue_size, schedule_delay_millis, max_export_batch_size
+        )
+
+        self._exporter = exporter
+        self._max_queue_size = max_queue_size
+        self._schedule_delay_millis = schedule_delay_millis
+        self._max_export_batch_size = max_export_batch_size
+        self._export_timeout_millis = export_timeout_millis
+        self._queue = collections.deque([], max_queue_size)
+        self._worker_thread = threading.Thread(
+            name="OtelBatchLogRecordProcessor",
+            target=self.worker,
+            daemon=True,
+        )
+        self._condition = threading.Condition(threading.Lock())
+        self._shutdown = False
+        self._flush_request = None
+        self._log_records = [None] * self._max_export_batch_size
+        self._worker_thread.start()
+        if hasattr(os, "register_at_fork"):
+            os.register_at_fork(after_in_child=self._at_fork_reinit)  # pylint: disable=protected-access
+        self._pid = os.getpid()
+
+    def _at_fork_reinit(self):
+        self._condition = threading.Condition(threading.Lock())
+        self._queue.clear()
+        self._worker_thread = threading.Thread(
+            name="OtelBatchLogRecordProcessor",
+            target=self.worker,
+            daemon=True,
+        )
+        self._worker_thread.start()
+        self._pid = os.getpid()
+
+    def worker(self):
+        timeout = self._schedule_delay_millis / 1e3
+        flush_request: Optional[_FlushRequest] = None
+        while not self._shutdown:
+            with self._condition:
+                if self._shutdown:
+                    # shutdown may have been called, avoid further processing
+                    break
+                flush_request = self._get_and_unset_flush_request()
+                if (
+                    len(self._queue) < self._max_export_batch_size
+                    and flush_request is None
+                ):
+                    self._condition.wait(timeout)
+
+                    flush_request = self._get_and_unset_flush_request()
+                    if not self._queue:
+                        timeout = self._schedule_delay_millis / 1e3
+                        self._notify_flush_request_finished(flush_request)
+                        flush_request = None
+                        continue
+                    if self._shutdown:
+                        break
+
+            start_ns = time_ns()
+            self._export(flush_request)
+            end_ns = time_ns()
+            # subtract the duration of this export call to the next timeout
+            timeout = self._schedule_delay_millis / 1e3 - (
+                (end_ns - start_ns) / 1e9
+            )
+
+            self._notify_flush_request_finished(flush_request)
+            flush_request = None
+
+        # there might have been a new flush request while export was running
+        # and before the done flag switched to true
+        with self._condition:
+            shutdown_flush_request = self._get_and_unset_flush_request()
+
+        # flush the remaining logs
+        self._drain_queue()
+        self._notify_flush_request_finished(flush_request)
+        self._notify_flush_request_finished(shutdown_flush_request)
+
+    def _export(self, flush_request: Optional[_FlushRequest] = None):
+        """Exports logs considering the given flush_request.
+
+        If flush_request is not None then logs are exported in batches
+        until the number of exported logs reached or exceeded the num of logs in
+        flush_request, otherwise exports at max max_export_batch_size logs.
+        """
+        if flush_request is None:
+            self._export_batch()
+            return
+
+        num_log_records = flush_request.num_log_records
+        while self._queue:
+            exported = self._export_batch()
+            num_log_records -= exported
+
+            if num_log_records <= 0:
+                break
+
+    def _export_batch(self) -> int:
+        """Exports at most max_export_batch_size logs and returns the number of
+        exported logs.
+        """
+        idx = 0
+        while idx < self._max_export_batch_size and self._queue:
+            record = self._queue.pop()
+            self._log_records[idx] = record
+            idx += 1
+        token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
+        try:
+            self._exporter.export(self._log_records[:idx])  # type: ignore
+        except Exception:  # pylint: disable=broad-exception-caught
+            _logger.exception("Exception while exporting logs.")
+        detach(token)
+
+        for index in range(idx):
+            self._log_records[index] = None
+        return idx
+
+    def _drain_queue(self):
+        """Export all elements until queue is empty.
+
+        Can only be called from the worker thread context because it invokes
+        `export` that is not thread safe.
+        """
+        while self._queue:
+            self._export_batch()
+
+    def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]:
+        flush_request = self._flush_request
+        self._flush_request = None
+        if flush_request is not None:
+            flush_request.num_log_records = len(self._queue)
+        return flush_request
+
+    @staticmethod
+    def _notify_flush_request_finished(
+        flush_request: Optional[_FlushRequest] = None,
+    ):
+        if flush_request is not None:
+            flush_request.event.set()
+
+    def _get_or_create_flush_request(self) -> _FlushRequest:
+        if self._flush_request is None:
+            self._flush_request = _FlushRequest()
+        return self._flush_request
+
+    def emit(self, log_data: LogData) -> None:
+        """Adds the `LogData` to queue and notifies the waiting threads
+        when size of queue reaches max_export_batch_size.
+        """
+        if self._shutdown:
+            return
+        if self._pid != os.getpid():
+            _BSP_RESET_ONCE.do_once(self._at_fork_reinit)
+
+        self._queue.appendleft(log_data)
+        if len(self._queue) >= self._max_export_batch_size:
+            with self._condition:
+                self._condition.notify()
+
+    def shutdown(self):
+        self._shutdown = True
+        with self._condition:
+            self._condition.notify_all()
+        self._worker_thread.join()
+        self._exporter.shutdown()
+
+    def force_flush(self, timeout_millis: Optional[int] = None) -> bool:
+        if timeout_millis is None:
+            timeout_millis = self._export_timeout_millis
+        if self._shutdown:
+            return True
+
+        with self._condition:
+            flush_request = self._get_or_create_flush_request()
+            self._condition.notify_all()
+
+        ret = flush_request.event.wait(timeout_millis / 1e3)
+        if not ret:
+            _logger.warning("Timeout was exceeded in force_flush().")
+        return ret
+
+    @staticmethod
+    def _default_max_queue_size():
+        try:
+            return int(
+                environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE)
+            )
+        except ValueError:
+            _logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BLRP_MAX_QUEUE_SIZE,
+                _DEFAULT_MAX_QUEUE_SIZE,
+            )
+            return _DEFAULT_MAX_QUEUE_SIZE
+
+    @staticmethod
+    def _default_schedule_delay_millis():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS
+                )
+            )
+        except ValueError:
+            _logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BLRP_SCHEDULE_DELAY,
+                _DEFAULT_SCHEDULE_DELAY_MILLIS,
+            )
+            return _DEFAULT_SCHEDULE_DELAY_MILLIS
+
+    @staticmethod
+    def _default_max_export_batch_size():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
+                    _DEFAULT_MAX_EXPORT_BATCH_SIZE,
+                )
+            )
+        except ValueError:
+            _logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
+                _DEFAULT_MAX_EXPORT_BATCH_SIZE,
+            )
+            return _DEFAULT_MAX_EXPORT_BATCH_SIZE
+
+    @staticmethod
+    def _default_export_timeout_millis():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS
+                )
+            )
+        except ValueError:
+            _logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BLRP_EXPORT_TIMEOUT,
+                _DEFAULT_EXPORT_TIMEOUT_MILLIS,
+            )
+            return _DEFAULT_EXPORT_TIMEOUT_MILLIS
+
+    @staticmethod
+    def _validate_arguments(
+        max_queue_size, schedule_delay_millis, max_export_batch_size
+    ):
+        if max_queue_size <= 0:
+            raise ValueError("max_queue_size must be a positive integer.")
+
+        if schedule_delay_millis <= 0:
+            raise ValueError("schedule_delay_millis must be positive.")
+
+        if max_export_batch_size <= 0:
+            raise ValueError(
+                "max_export_batch_size must be a positive integer."
+            )
+
+        if max_export_batch_size > max_queue_size:
+            raise ValueError(
+                "max_export_batch_size must be less than or equal to max_queue_size."
+            )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py
new file mode 100644
index 00000000..68cb6b73
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py
@@ -0,0 +1,51 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+import typing
+
+from opentelemetry.sdk._logs import LogData
+from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
+
+
+class InMemoryLogExporter(LogExporter):
+    """Implementation of :class:`.LogExporter` that stores logs in memory.
+
+    This class can be used for testing purposes. It stores the exported logs
+    in a list in memory that can be retrieved using the
+    :func:`.get_finished_logs` method.
+    """
+
+    def __init__(self):
+        self._logs = []
+        self._lock = threading.Lock()
+        self._stopped = False
+
+    def clear(self) -> None:
+        with self._lock:
+            self._logs.clear()
+
+    def get_finished_logs(self) -> typing.Tuple[LogData, ...]:
+        with self._lock:
+            return tuple(self._logs)
+
+    def export(self, batch: typing.Sequence[LogData]) -> LogExportResult:
+        if self._stopped:
+            return LogExportResult.FAILURE
+        with self._lock:
+            self._logs.extend(batch)
+        return LogExportResult.SUCCESS
+
+    def shutdown(self) -> None:
+        self._stopped = True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py
new file mode 100644
index 00000000..37a9eca7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py
@@ -0,0 +1,35 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from opentelemetry.sdk._logs._internal.export import (
+    BatchLogRecordProcessor,
+    ConsoleLogExporter,
+    LogExporter,
+    LogExportResult,
+    SimpleLogRecordProcessor,
+)
+
+# The point module is not in the export directory to avoid a circular import.
+from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import (
+    InMemoryLogExporter,
+)
+
+__all__ = [
+    "BatchLogRecordProcessor",
+    "ConsoleLogExporter",
+    "LogExporter",
+    "LogExportResult",
+    "SimpleLogRecordProcessor",
+    "InMemoryLogExporter",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py
new file mode 100644
index 00000000..f0980754
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py
@@ -0,0 +1,782 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OTEL_SDK_DISABLED = "OTEL_SDK_DISABLED"
+"""
+.. envvar:: OTEL_SDK_DISABLED
+
+The :envvar:`OTEL_SDK_DISABLED` environment variable disables the SDK for all signals
+Default: "false"
+"""
+
+OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES"
+"""
+.. envvar:: OTEL_RESOURCE_ATTRIBUTES
+
+The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource
+attributes to be passed to the SDK at process invocation. The attributes from
+:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to
+`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*
+priority. Attributes should be in the format ``key1=value1,key2=value2``.
+Additional details are available `in the specification
+<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`__.
+
+.. code-block:: console
+
+    $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF
+    import pprint
+    from opentelemetry.sdk.resources import Resource
+    pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes)
+    EOF
+    {'service.name': 'shoppingcard',
+    'telemetry.sdk.language': 'python',
+    'telemetry.sdk.name': 'opentelemetry',
+    'telemetry.sdk.version': '0.13.dev0',
+    'will_be_overridden': 'bar'}
+"""
+
+OTEL_LOG_LEVEL = "OTEL_LOG_LEVEL"
+"""
+.. envvar:: OTEL_LOG_LEVEL
+
+The :envvar:`OTEL_LOG_LEVEL` environment variable sets the log level used by the SDK logger
+Default: "info"
+"""
+
+OTEL_TRACES_SAMPLER = "OTEL_TRACES_SAMPLER"
+"""
+.. envvar:: OTEL_TRACES_SAMPLER
+
+The :envvar:`OTEL_TRACES_SAMPLER` environment variable sets the sampler to be used for traces.
+Sampling is a mechanism to control the noise introduced by OpenTelemetry by reducing the number
+of traces collected and sent to the backend
+Default: "parentbased_always_on"
+"""
+
+OTEL_TRACES_SAMPLER_ARG = "OTEL_TRACES_SAMPLER_ARG"
+"""
+.. envvar:: OTEL_TRACES_SAMPLER_ARG
+
+The :envvar:`OTEL_TRACES_SAMPLER_ARG` environment variable will only be used if OTEL_TRACES_SAMPLER is set.
+Each Sampler type defines its own expected input, if any.
+Invalid or unrecognized input is ignored,
+i.e. the SDK behaves as if OTEL_TRACES_SAMPLER_ARG is not set.
+"""
+
+OTEL_BLRP_SCHEDULE_DELAY = "OTEL_BLRP_SCHEDULE_DELAY"
+"""
+.. envvar:: OTEL_BLRP_SCHEDULE_DELAY
+
+The :envvar:`OTEL_BLRP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchLogRecordProcessor.
+Default: 5000
+"""
+
+OTEL_BLRP_EXPORT_TIMEOUT = "OTEL_BLRP_EXPORT_TIMEOUT"
+"""
+.. envvar:: OTEL_BLRP_EXPORT_TIMEOUT
+
+The :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchLogRecordProcessor.
+Default: 30000
+"""
+
+OTEL_BLRP_MAX_QUEUE_SIZE = "OTEL_BLRP_MAX_QUEUE_SIZE"
+"""
+.. envvar:: OTEL_BLRP_MAX_QUEUE_SIZE
+
+The :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchLogRecordProcessor.
+Default: 2048
+"""
+
+OTEL_BLRP_MAX_EXPORT_BATCH_SIZE = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
+"""
+.. envvar:: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE
+
+The :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchLogRecordProcessor.
+Default: 512
+"""
+
+OTEL_BSP_SCHEDULE_DELAY = "OTEL_BSP_SCHEDULE_DELAY"
+"""
+.. envvar:: OTEL_BSP_SCHEDULE_DELAY
+
+The :envvar:`OTEL_BSP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchSpanProcessor.
+Default: 5000
+"""
+
+OTEL_BSP_EXPORT_TIMEOUT = "OTEL_BSP_EXPORT_TIMEOUT"
+"""
+.. envvar:: OTEL_BSP_EXPORT_TIMEOUT
+
+The :envvar:`OTEL_BSP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchSpanProcessor.
+Default: 30000
+"""
+
+OTEL_BSP_MAX_QUEUE_SIZE = "OTEL_BSP_MAX_QUEUE_SIZE"
+"""
+.. envvar:: OTEL_BSP_MAX_QUEUE_SIZE
+
+The :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchSpanProcessor.
+Default: 2048
+"""
+
+OTEL_BSP_MAX_EXPORT_BATCH_SIZE = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+"""
+.. envvar:: OTEL_BSP_MAX_EXPORT_BATCH_SIZE
+
+The :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchSpanProcessor.
+Default: 512
+"""
+
+OTEL_ATTRIBUTE_COUNT_LIMIT = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_ATTRIBUTE_COUNT_LIMIT
+
+The :envvar:`OTEL_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed attribute count for spans, events and links.
+This limit is overridden by model specific limits such as OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT.
+Default: 128
+"""
+
+OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+"""
+.. envvar:: OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT
+
+The :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed attribute length.
+"""
+
+OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT
+
+The :envvar:`OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed event attribute count.
+Default: 128
+"""
+
+OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT
+
+The :envvar:`OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed link attribute count.
+Default: 128
+"""
+
+OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT
+
+The :envvar:`OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed span attribute count.
+Default: 128
+"""
+
+OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT = (
+    "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+)
+"""
+.. envvar:: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+
+The :envvar:`OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed length
+span attribute values can have. This takes precedence over :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT`.
+"""
+
+OTEL_SPAN_EVENT_COUNT_LIMIT = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_SPAN_EVENT_COUNT_LIMIT
+
+The :envvar:`OTEL_SPAN_EVENT_COUNT_LIMIT` represents the maximum allowed span event count.
+Default: 128
+"""
+
+OTEL_SPAN_LINK_COUNT_LIMIT = "OTEL_SPAN_LINK_COUNT_LIMIT"
+"""
+.. envvar:: OTEL_SPAN_LINK_COUNT_LIMIT
+
+The :envvar:`OTEL_SPAN_LINK_COUNT_LIMIT` represents the maximum allowed span link count.
+Default: 128
+"""
+
+OTEL_EXPORTER_JAEGER_AGENT_HOST = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_HOST
+
+The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_HOST` represents the hostname for the Jaeger agent.
+Default: "localhost"
+"""
+
+OTEL_EXPORTER_JAEGER_AGENT_PORT = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_PORT
+
+The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_PORT` represents the port for the Jaeger agent.
+Default: 6831
+"""
+
+OTEL_EXPORTER_JAEGER_ENDPOINT = "OTEL_EXPORTER_JAEGER_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_JAEGER_ENDPOINT` represents the HTTP endpoint for Jaeger traces.
+Default: "http://localhost:14250"
+"""
+
+OTEL_EXPORTER_JAEGER_USER = "OTEL_EXPORTER_JAEGER_USER"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_USER
+
+The :envvar:`OTEL_EXPORTER_JAEGER_USER` represents the username to be used for HTTP basic authentication.
+"""
+
+OTEL_EXPORTER_JAEGER_PASSWORD = "OTEL_EXPORTER_JAEGER_PASSWORD"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_PASSWORD
+
+The :envvar:`OTEL_EXPORTER_JAEGER_PASSWORD` represents the password to be used for HTTP basic authentication.
+"""
+
+OTEL_EXPORTER_JAEGER_TIMEOUT = "OTEL_EXPORTER_JAEGER_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_TIMEOUT
+
+Maximum time the Jaeger exporter will wait for each batch export.
+Default: 10
+"""
+
+OTEL_EXPORTER_ZIPKIN_ENDPOINT = "OTEL_EXPORTER_ZIPKIN_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_ZIPKIN_ENDPOINT
+
+Zipkin collector endpoint to which the exporter will send data. This may
+include a path (e.g. ``http://example.com:9411/api/v2/spans``).
+"""
+
+OTEL_EXPORTER_ZIPKIN_TIMEOUT = "OTEL_EXPORTER_ZIPKIN_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_ZIPKIN_TIMEOUT
+
+Maximum time (in seconds) the Zipkin exporter will wait for each batch export.
+Default: 10
+"""
+
+OTEL_EXPORTER_OTLP_PROTOCOL = "OTEL_EXPORTER_OTLP_PROTOCOL"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_PROTOCOL
+
+The :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` represents the transport protocol for the
+OTLP exporter.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_PROTOCOL = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` represents the transport protocol for spans.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_PROTOCOL = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_PROTOCOL
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_PROTOCOL` represents the transport protocol for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_PROTOCOL = "OTEL_EXPORTER_OTLP_LOGS_PROTOCOL"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_PROTOCOL
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_PROTOCOL` represents the transport protocol for logs.
+"""
+
+OTEL_EXPORTER_OTLP_CERTIFICATE = "OTEL_EXPORTER_OTLP_CERTIFICATE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client. Should only be used for a secure connection.
+"""
+
+OTEL_EXPORTER_OTLP_HEADERS = "OTEL_EXPORTER_OTLP_HEADERS"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_HEADERS
+
+The :envvar:`OTEL_EXPORTER_OTLP_HEADERS` contains the key-value pairs to be used as headers
+associated with gRPC or HTTP requests.
+"""
+
+
+OTEL_EXPORTER_OTLP_COMPRESSION = "OTEL_EXPORTER_OTLP_COMPRESSION"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_COMPRESSION
+
+Specifies a gRPC compression method to be used in the OTLP exporters.
+Possible values are:
+
+- ``gzip`` corresponding to `grpc.Compression.Gzip`.
+- ``deflate`` corresponding to `grpc.Compression.Deflate`.
+
+If no ``OTEL_EXPORTER_OTLP_*COMPRESSION`` environment variable is present or
+``compression`` argument passed to the exporter, the default
+`grpc.Compression.NoCompression` will be used. Additional details are
+available `in the specification
+<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter>`__.
+"""
+
+OTEL_EXPORTER_OTLP_TIMEOUT = "OTEL_EXPORTER_OTLP_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TIMEOUT
+
+The :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export.
+Default: 10
+"""
+
+OTEL_EXPORTER_OTLP_ENDPOINT = "OTEL_EXPORTER_OTLP_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` target to which the exporter is going to send spans or metrics.
+The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
+A scheme of https indicates a secure connection and takes precedence over the insecure configuration setting.
+Default: "http://localhost:4317"
+"""
+
+OTEL_EXPORTER_OTLP_INSECURE = "OTEL_EXPORTER_OTLP_INSECURE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_INSECURE
+
+The :envvar:`OTEL_EXPORTER_OTLP_INSECURE` represents whether to enable client transport security for gRPC requests.
+A scheme of https takes precedence over this configuration setting.
+Default: False
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_INSECURE = "OTEL_EXPORTER_OTLP_TRACES_INSECURE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_INSECURE
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_INSECURE` represents whether to enable client transport security
+for gRPC requests for spans. A scheme of https takes precedence over the this configuration setting.
+Default: False
+"""
+
+
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` target to which the span exporter is going to send spans.
+The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
+A scheme of https indicates a secure connection and takes precedence over this configuration setting.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metrics exporter is going to send metrics.
+The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
+A scheme of https indicates a secure connection and takes precedence over this configuration setting.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_ENDPOINT = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` target to which the log exporter is going to send logs.
+The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
+A scheme of https indicates a secure connection and takes precedence over this configuration setting.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE = "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client for traces. Should only be used for a secure connection for tracing.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client for metrics. Should only be used for a secure connection for exporting metrics.
+"""
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY = "OTEL_EXPORTER_OTLP_CLIENT_KEY"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_KEY
+
+The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_KEY` stores the path to the client private key to use
+in mTLS communication in PEM format.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY = "OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` stores the path to the client private key to use
+in mTLS communication in PEM format for traces.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` stores the path to the client private key to use
+in mTLS communication in PEM format for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY` stores the path to the client private key to use
+in mTLS communication in PEM format for logs.
+"""
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE = "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
+clients private key to use in mTLS communication in PEM format.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
+clients private key to use in mTLS communication in PEM format for traces.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
+clients private key to use in mTLS communication in PEM format for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
+clients private key to use in mTLS communication in PEM format for logs.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_HEADERS = "OTEL_EXPORTER_OTLP_TRACES_HEADERS"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_HEADERS
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` contains the key-value pairs to be used as headers for spans
+associated with gRPC or HTTP requests.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics
+associated with gRPC or HTTP requests.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_HEADERS = "OTEL_EXPORTER_OTLP_LOGS_HEADERS"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_HEADERS
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_HEADERS` contains the key-value pairs to be used as headers for logs
+associated with gRPC or HTTP requests.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION = "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_COMPRESSION
+
+Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the span
+exporter. If both are present, this takes higher precedence.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = (
+    "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION
+
+Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric
+exporter. If both are present, this takes higher precedence.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_COMPRESSION = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_COMPRESSION
+
+Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the log
+exporter. If both are present, this takes higher precedence.
+"""
+
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_TRACES_TIMEOUT
+
+The :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` is the maximum time the OTLP exporter will
+wait for each batch export for spans.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will
+wait for each batch export for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_INSECURE = "OTEL_EXPORTER_OTLP_METRICS_INSECURE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_INSECURE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_INSECURE` represents whether to enable client transport security
+for gRPC requests for metrics. A scheme of https takes precedence over the this configuration setting.
+Default: False
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_INSECURE = "OTEL_EXPORTER_OTLP_LOGS_INSECURE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_INSECURE
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_INSECURE` represents whether to enable client transport security
+for gRPC requests for logs. A scheme of https takes precedence over the this configuration setting.
+Default: False
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metric exporter is going to send spans.
+The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
+A scheme of https indicates a secure connection and takes precedence over this configuration setting.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client for metrics. Should only be used for a secure connection for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE = "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client for logs. Should only be used for a secure connection for logs.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics
+associated with gRPC or HTTP requests.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will
+wait for each batch export for metrics.
+"""
+
+OTEL_EXPORTER_OTLP_LOGS_TIMEOUT = "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
+
+The :envvar:`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` is the maximum time the OTLP exporter will
+wait for each batch export for logs.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = (
+    "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION
+
+Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric
+exporter. If both are present, this takes higher precedence.
+"""
+
+OTEL_EXPORTER_JAEGER_CERTIFICATE = "OTEL_EXPORTER_JAEGER_CERTIFICATE"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_JAEGER_CERTIFICATE` stores the path to the certificate file for
+TLS credentials of gRPC client for Jaeger. Should only be used for a secure connection with Jaeger.
+"""
+
+OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES = (
+    "OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES"
+)
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES
+
+The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES` is a boolean flag to determine whether
+to split a large span batch to admire the udp packet size limit.
+"""
+
+OTEL_SERVICE_NAME = "OTEL_SERVICE_NAME"
+"""
+.. envvar:: OTEL_SERVICE_NAME
+
+Convenience environment variable for setting the service name resource attribute.
+The following two environment variables have the same effect
+
+.. code-block:: console
+
+    OTEL_SERVICE_NAME=my-python-service
+
+    OTEL_RESOURCE_ATTRIBUTES=service.name=my-python-service
+
+
+If both are set, :envvar:`OTEL_SERVICE_NAME` takes precedence.
+"""
+
+
+_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED = (
+    "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED"
+)
+"""
+.. envvar:: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED
+
+The :envvar:`OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED` environment variable allows users to
+enable/disable the auto instrumentation for the python logging module.
+Default: False
+
+Note: Logs SDK and its related settings are experimental.
+"""
+
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE = (
+    "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment
+variable allows users to set the default aggregation temporality policy to use
+on the basis of instrument kind. The valid (case-insensitive) values are:
+
+``CUMULATIVE``: Use ``CUMULATIVE`` aggregation temporality for all instrument kinds.
+``DELTA``: Use ``DELTA`` aggregation temporality for ``Counter``, ``Asynchronous Counter`` and ``Histogram``.
+Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter`` and ``Asynchronous UpDownCounter``.
+``LOWMEMORY``: Use ``DELTA`` aggregation temporality for ``Counter`` and ``Histogram``.
+Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter``, ``AsynchronousCounter`` and ``Asynchronous UpDownCounter``.
+"""
+
+OTEL_EXPORTER_JAEGER_GRPC_INSECURE = "OTEL_EXPORTER_JAEGER_GRPC_INSECURE"
+"""
+.. envvar:: OTEL_EXPORTER_JAEGER_GRPC_INSECURE
+
+The :envvar:`OTEL_EXPORTER_JAEGER_GRPC_INSECURE` is a boolean flag to True if collector has no encryption or authentication.
+"""
+
+OTEL_METRIC_EXPORT_INTERVAL = "OTEL_METRIC_EXPORT_INTERVAL"
+"""
+.. envvar:: OTEL_METRIC_EXPORT_INTERVAL
+
+The :envvar:`OTEL_METRIC_EXPORT_INTERVAL` is the time interval (in milliseconds) between the start of two export attempts.
+"""
+
+OTEL_METRIC_EXPORT_TIMEOUT = "OTEL_METRIC_EXPORT_TIMEOUT"
+"""
+.. envvar:: OTEL_METRIC_EXPORT_TIMEOUT
+
+The :envvar:`OTEL_METRIC_EXPORT_TIMEOUT` is the maximum allowed time (in milliseconds) to export data.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY"
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` is the clients private key to use in mTLS communication in PEM format.
+"""
+
+OTEL_METRICS_EXEMPLAR_FILTER = "OTEL_METRICS_EXEMPLAR_FILTER"
+"""
+.. envvar:: OTEL_METRICS_EXEMPLAR_FILTER
+
+The :envvar:`OTEL_METRICS_EXEMPLAR_FILTER` is the filter for which measurements can become Exemplars.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION = (
+    "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` is the default aggregation to use for histogram instruments.
+"""
+
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = (
+    "OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE"
+)
+"""
+.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE
+
+The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` is the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+"""
+
+OTEL_EXPERIMENTAL_RESOURCE_DETECTORS = "OTEL_EXPERIMENTAL_RESOURCE_DETECTORS"
+"""
+.. envvar:: OTEL_EXPERIMENTAL_RESOURCE_DETECTORS
+
+The :envvar:`OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` is a comma-separated string
+of names of resource detectors. These names must be the same as the names of
+entry points for the ```opentelemetry_resource_detector``` entry point. This is an
+experimental feature and the name of this variable and its behavior can change
+in a non-backwards compatible way.
+"""
+
+OTEL_EXPORTER_PROMETHEUS_HOST = "OTEL_EXPORTER_PROMETHEUS_HOST"
+"""
+.. envvar:: OTEL_EXPORTER_PROMETHEUS_HOST
+
+The :envvar:`OTEL_EXPORTER_PROMETHEUS_HOST` environment variable configures the host used by
+the Prometheus exporter.
+Default: "localhost"
+
+This is an experimental environment variable and the name of this variable and its behavior can
+change in a non-backwards compatible way.
+"""
+
+OTEL_EXPORTER_PROMETHEUS_PORT = "OTEL_EXPORTER_PROMETHEUS_PORT"
+"""
+.. envvar:: OTEL_EXPORTER_PROMETHEUS_PORT
+
+The :envvar:`OTEL_EXPORTER_PROMETHEUS_PORT` environment variable configures the port used by
+the Prometheus exporter.
+Default: 9464
+
+This is an experimental environment variable and the name of this variable and its behavior can
+change in a non-backwards compatible way.
+"""
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py
new file mode 100644
index 00000000..3e0e778f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py
@@ -0,0 +1,143 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Global Error Handler
+
+This module provides a global error handler and an interface that allows
+error handlers to be registered with the global error handler via entry points.
+A default error handler is also provided.
+
+To use this feature, users can create an error handler that is registered
+using the ``opentelemetry_error_handler`` entry point. A class is to be
+registered in this entry point, this class must inherit from the
+``opentelemetry.sdk.error_handler.ErrorHandler`` class and implement the
+corresponding ``handle`` method. This method will receive the exception object
+that is to be handled. The error handler class should also inherit from the
+exception classes it wants to handle. For example, this would be an error
+handler that handles ``ZeroDivisionError``:
+
+.. code:: python
+
+    from opentelemetry.sdk.error_handler import ErrorHandler
+    from logging import getLogger
+
+    logger = getLogger(__name__)
+
+
+    class ErrorHandler0(ErrorHandler, ZeroDivisionError):
+
+        def _handle(self, error: Exception, *args, **kwargs):
+
+            logger.exception("ErrorHandler0 handling a ZeroDivisionError")
+
+To use the global error handler, just instantiate it as a context manager where
+you want exceptions to be handled:
+
+
+.. code:: python
+
+    from opentelemetry.sdk.error_handler import GlobalErrorHandler
+
+    with GlobalErrorHandler():
+        1 / 0
+
+If the class of the exception raised in the scope of the ``GlobalErrorHandler``
+object is not parent of any registered error handler, then the default error
+handler will handle the exception. This default error handler will only log the
+exception to standard logging, the exception won't be raised any further.
+"""
+
+from abc import ABC, abstractmethod
+from logging import getLogger
+
+from opentelemetry.util._importlib_metadata import entry_points
+
+logger = getLogger(__name__)
+
+
+class ErrorHandler(ABC):
+    @abstractmethod
+    def _handle(self, error: Exception, *args, **kwargs):
+        """
+        Handle an exception
+        """
+
+
+class _DefaultErrorHandler(ErrorHandler):
+    """
+    Default error handler
+
+    This error handler just logs the exception using standard logging.
+    """
+
+    # pylint: disable=useless-return
+    def _handle(self, error: Exception, *args, **kwargs):
+        logger.exception("Error handled by default error handler: ")
+        return None
+
+
+class GlobalErrorHandler:
+    """
+    Global error handler
+
+    This is a singleton class that can be instantiated anywhere to get the
+    global error handler. This object provides a handle method that receives
+    an exception object that will be handled by the registered error handlers.
+    """
+
+    _instance = None
+
+    def __new__(cls) -> "GlobalErrorHandler":
+        if cls._instance is None:
+            cls._instance = super().__new__(cls)
+
+        return cls._instance
+
+    def __enter__(self):
+        pass
+
+    # pylint: disable=no-self-use
+    def __exit__(self, exc_type, exc_value, traceback):
+        if exc_value is None:
+            return None
+
+        plugin_handled = False
+
+        error_handler_entry_points = entry_points(
+            group="opentelemetry_error_handler"
+        )
+
+        for error_handler_entry_point in error_handler_entry_points:
+            error_handler_class = error_handler_entry_point.load()
+
+            if issubclass(error_handler_class, exc_value.__class__):
+                try:
+                    error_handler_class()._handle(exc_value)
+                    plugin_handled = True
+
+                # pylint: disable=broad-exception-caught
+                except Exception as error_handling_error:
+                    logger.exception(
+                        "%s error while handling error"
+                        " %s by error handler %s",
+                        error_handling_error.__class__.__name__,
+                        exc_value.__class__.__name__,
+                        error_handler_class.__name__,
+                    )
+
+        if not plugin_handled:
+            _DefaultErrorHandler()._handle(exc_value)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py
new file mode 100644
index 00000000..b022f129
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py
@@ -0,0 +1,57 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from opentelemetry.sdk.metrics._internal import Meter, MeterProvider
+from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.exemplar import (
+    AlignedHistogramBucketExemplarReservoir,
+    AlwaysOffExemplarFilter,
+    AlwaysOnExemplarFilter,
+    Exemplar,
+    ExemplarFilter,
+    ExemplarReservoir,
+    SimpleFixedSizeExemplarReservoir,
+    TraceBasedExemplarFilter,
+)
+from opentelemetry.sdk.metrics._internal.instrument import (
+    Counter,
+    Histogram,
+    ObservableCounter,
+    ObservableGauge,
+    ObservableUpDownCounter,
+    UpDownCounter,
+)
+from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge
+
+__all__ = [
+    "AlignedHistogramBucketExemplarReservoir",
+    "AlwaysOnExemplarFilter",
+    "AlwaysOffExemplarFilter",
+    "Exemplar",
+    "ExemplarFilter",
+    "ExemplarReservoir",
+    "Meter",
+    "MeterProvider",
+    "MetricsTimeoutError",
+    "Counter",
+    "Histogram",
+    "_Gauge",
+    "ObservableCounter",
+    "ObservableGauge",
+    "ObservableUpDownCounter",
+    "SimpleFixedSizeExemplarReservoir",
+    "UpDownCounter",
+    "TraceBasedExemplarFilter",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py
new file mode 100644
index 00000000..faa0959f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py
@@ -0,0 +1,582 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import weakref
+from atexit import register, unregister
+from logging import getLogger
+from os import environ
+from threading import Lock
+from time import time_ns
+from typing import Optional, Sequence
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics
+from opentelemetry.metrics import Counter as APICounter
+from opentelemetry.metrics import Histogram as APIHistogram
+from opentelemetry.metrics import Meter as APIMeter
+from opentelemetry.metrics import MeterProvider as APIMeterProvider
+from opentelemetry.metrics import NoOpMeter
+from opentelemetry.metrics import ObservableCounter as APIObservableCounter
+from opentelemetry.metrics import ObservableGauge as APIObservableGauge
+from opentelemetry.metrics import (
+    ObservableUpDownCounter as APIObservableUpDownCounter,
+)
+from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
+from opentelemetry.metrics import _Gauge as APIGauge
+from opentelemetry.sdk.environment_variables import (
+    OTEL_METRICS_EXEMPLAR_FILTER,
+    OTEL_SDK_DISABLED,
+)
+from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.exemplar import (
+    AlwaysOffExemplarFilter,
+    AlwaysOnExemplarFilter,
+    ExemplarFilter,
+    TraceBasedExemplarFilter,
+)
+from opentelemetry.sdk.metrics._internal.instrument import (
+    _Counter,
+    _Gauge,
+    _Histogram,
+    _ObservableCounter,
+    _ObservableGauge,
+    _ObservableUpDownCounter,
+    _UpDownCounter,
+)
+from opentelemetry.sdk.metrics._internal.measurement_consumer import (
+    MeasurementConsumer,
+    SynchronousMeasurementConsumer,
+)
+from opentelemetry.sdk.metrics._internal.sdk_configuration import (
+    SdkConfiguration,
+)
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.util.instrumentation import InstrumentationScope
+from opentelemetry.util._once import Once
+from opentelemetry.util.types import (
+    Attributes,
+)
+
+_logger = getLogger(__name__)
+
+
+class Meter(APIMeter):
+    """See `opentelemetry.metrics.Meter`."""
+
+    def __init__(
+        self,
+        instrumentation_scope: InstrumentationScope,
+        measurement_consumer: MeasurementConsumer,
+    ):
+        super().__init__(
+            name=instrumentation_scope.name,
+            version=instrumentation_scope.version,
+            schema_url=instrumentation_scope.schema_url,
+        )
+        self._instrumentation_scope = instrumentation_scope
+        self._measurement_consumer = measurement_consumer
+        self._instrument_id_instrument = {}
+        self._instrument_id_instrument_lock = Lock()
+
+    def create_counter(self, name, unit="", description="") -> APICounter:
+        status = self._register_instrument(name, _Counter, unit, description)
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APICounter.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _Counter(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            unit,
+            description,
+        )
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_up_down_counter(
+        self, name, unit="", description=""
+    ) -> APIUpDownCounter:
+        status = self._register_instrument(
+            name, _UpDownCounter, unit, description
+        )
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIUpDownCounter.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _UpDownCounter(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            unit,
+            description,
+        )
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_observable_counter(
+        self,
+        name,
+        callbacks=None,
+        unit="",
+        description="",
+    ) -> APIObservableCounter:
+        status = self._register_instrument(
+            name, _ObservableCounter, unit, description
+        )
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIObservableCounter.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _ObservableCounter(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            callbacks,
+            unit,
+            description,
+        )
+
+        self._measurement_consumer.register_asynchronous_instrument(instrument)
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_histogram(
+        self,
+        name: str,
+        unit: str = "",
+        description: str = "",
+        *,
+        explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
+    ) -> APIHistogram:
+        if explicit_bucket_boundaries_advisory is not None:
+            invalid_advisory = False
+            if isinstance(explicit_bucket_boundaries_advisory, Sequence):
+                try:
+                    invalid_advisory = not (
+                        all(
+                            isinstance(e, (float, int))
+                            for e in explicit_bucket_boundaries_advisory
+                        )
+                    )
+                except (KeyError, TypeError):
+                    invalid_advisory = True
+            else:
+                invalid_advisory = True
+
+            if invalid_advisory:
+                explicit_bucket_boundaries_advisory = None
+                _logger.warning(
+                    "explicit_bucket_boundaries_advisory must be a sequence of numbers"
+                )
+
+        status = self._register_instrument(
+            name,
+            _Histogram,
+            unit,
+            description,
+            explicit_bucket_boundaries_advisory,
+        )
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIHistogram.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _Histogram(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            unit,
+            description,
+            explicit_bucket_boundaries_advisory,
+        )
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_gauge(self, name, unit="", description="") -> APIGauge:
+        status = self._register_instrument(name, _Gauge, unit, description)
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIGauge.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _Gauge(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            unit,
+            description,
+        )
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_observable_gauge(
+        self, name, callbacks=None, unit="", description=""
+    ) -> APIObservableGauge:
+        status = self._register_instrument(
+            name, _ObservableGauge, unit, description
+        )
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIObservableGauge.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _ObservableGauge(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            callbacks,
+            unit,
+            description,
+        )
+
+        self._measurement_consumer.register_asynchronous_instrument(instrument)
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+    def create_observable_up_down_counter(
+        self, name, callbacks=None, unit="", description=""
+    ) -> APIObservableUpDownCounter:
+        status = self._register_instrument(
+            name, _ObservableUpDownCounter, unit, description
+        )
+
+        if status.conflict:
+            # FIXME #2558 go through all views here and check if this
+            # instrument registration conflict can be fixed. If it can be, do
+            # not log the following warning.
+            self._log_instrument_registration_conflict(
+                name,
+                APIObservableUpDownCounter.__name__,
+                unit,
+                description,
+                status,
+            )
+        if status.already_registered:
+            with self._instrument_id_instrument_lock:
+                return self._instrument_id_instrument[status.instrument_id]
+
+        instrument = _ObservableUpDownCounter(
+            name,
+            self._instrumentation_scope,
+            self._measurement_consumer,
+            callbacks,
+            unit,
+            description,
+        )
+
+        self._measurement_consumer.register_asynchronous_instrument(instrument)
+
+        with self._instrument_id_instrument_lock:
+            self._instrument_id_instrument[status.instrument_id] = instrument
+            return instrument
+
+
+def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter:
+    if exemplar_filter == "trace_based":
+        return TraceBasedExemplarFilter()
+    if exemplar_filter == "always_on":
+        return AlwaysOnExemplarFilter()
+    if exemplar_filter == "always_off":
+        return AlwaysOffExemplarFilter()
+    msg = f"Unknown exemplar filter '{exemplar_filter}'."
+    raise ValueError(msg)
+
+
+class MeterProvider(APIMeterProvider):
+    r"""See `opentelemetry.metrics.MeterProvider`.
+
+    Args:
+        metric_readers: Register metric readers to collect metrics from the SDK
+            on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is
+            completely independent and will collect separate streams of
+            metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push
+            exporters here.
+        resource: The resource representing what the metrics emitted from the SDK pertain to.
+        shutdown_on_exit: If true, registers an `atexit` handler to call
+            `MeterProvider.shutdown`
+        views: The views to configure the metric output the SDK
+
+    By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s
+    are provided) will report metrics with the default aggregation for the
+    instrument's kind. To disable instruments by default, configure a match-all
+    :class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable
+    individual instruments:
+
+    .. code-block:: python
+        :caption: Disable default views
+
+        MeterProvider(
+            views=[
+                View(instrument_name="*", aggregation=DropAggregation()),
+                View(instrument_name="mycounter"),
+            ],
+            # ...
+        )
+    """
+
+    _all_metric_readers_lock = Lock()
+    _all_metric_readers = weakref.WeakSet()
+
+    def __init__(
+        self,
+        metric_readers: Sequence[
+            "opentelemetry.sdk.metrics.export.MetricReader"
+        ] = (),
+        resource: Optional[Resource] = None,
+        exemplar_filter: Optional[ExemplarFilter] = None,
+        shutdown_on_exit: bool = True,
+        views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
+    ):
+        self._lock = Lock()
+        self._meter_lock = Lock()
+        self._atexit_handler = None
+        if resource is None:
+            resource = Resource.create({})
+        self._sdk_config = SdkConfiguration(
+            exemplar_filter=(
+                exemplar_filter
+                or _get_exemplar_filter(
+                    environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based")
+                )
+            ),
+            resource=resource,
+            metric_readers=metric_readers,
+            views=views,
+        )
+        self._measurement_consumer = SynchronousMeasurementConsumer(
+            sdk_config=self._sdk_config
+        )
+        disabled = environ.get(OTEL_SDK_DISABLED, "")
+        self._disabled = disabled.lower().strip() == "true"
+
+        if shutdown_on_exit:
+            self._atexit_handler = register(self.shutdown)
+
+        self._meters = {}
+        self._shutdown_once = Once()
+        self._shutdown = False
+
+        for metric_reader in self._sdk_config.metric_readers:
+            with self._all_metric_readers_lock:
+                if metric_reader in self._all_metric_readers:
+                    # pylint: disable=broad-exception-raised
+                    raise Exception(
+                        f"MetricReader {metric_reader} has been registered "
+                        "already in other MeterProvider instance"
+                    )
+
+                self._all_metric_readers.add(metric_reader)
+
+            metric_reader._set_collect_callback(
+                self._measurement_consumer.collect
+            )
+
+    def force_flush(self, timeout_millis: float = 10_000) -> bool:
+        deadline_ns = time_ns() + timeout_millis * 10**6
+
+        metric_reader_error = {}
+
+        for metric_reader in self._sdk_config.metric_readers:
+            current_ts = time_ns()
+            try:
+                if current_ts >= deadline_ns:
+                    raise MetricsTimeoutError(
+                        "Timed out while flushing metric readers"
+                    )
+                metric_reader.force_flush(
+                    timeout_millis=(deadline_ns - current_ts) / 10**6
+                )
+
+            # pylint: disable=broad-exception-caught
+            except Exception as error:
+                metric_reader_error[metric_reader] = error
+
+        if metric_reader_error:
+            metric_reader_error_string = "\n".join(
+                [
+                    f"{metric_reader.__class__.__name__}: {repr(error)}"
+                    for metric_reader, error in metric_reader_error.items()
+                ]
+            )
+
+            # pylint: disable=broad-exception-raised
+            raise Exception(
+                "MeterProvider.force_flush failed because the following "
+                "metric readers failed during collect:\n"
+                f"{metric_reader_error_string}"
+            )
+        return True
+
+    def shutdown(self, timeout_millis: float = 30_000):
+        deadline_ns = time_ns() + timeout_millis * 10**6
+
+        def _shutdown():
+            self._shutdown = True
+
+        did_shutdown = self._shutdown_once.do_once(_shutdown)
+
+        if not did_shutdown:
+            _logger.warning("shutdown can only be called once")
+            return
+
+        metric_reader_error = {}
+
+        for metric_reader in self._sdk_config.metric_readers:
+            current_ts = time_ns()
+            try:
+                if current_ts >= deadline_ns:
+                    # pylint: disable=broad-exception-raised
+                    raise Exception(
+                        "Didn't get to execute, deadline already exceeded"
+                    )
+                metric_reader.shutdown(
+                    timeout_millis=(deadline_ns - current_ts) / 10**6
+                )
+
+            # pylint: disable=broad-exception-caught
+            except Exception as error:
+                metric_reader_error[metric_reader] = error
+
+        if self._atexit_handler is not None:
+            unregister(self._atexit_handler)
+            self._atexit_handler = None
+
+        if metric_reader_error:
+            metric_reader_error_string = "\n".join(
+                [
+                    f"{metric_reader.__class__.__name__}: {repr(error)}"
+                    for metric_reader, error in metric_reader_error.items()
+                ]
+            )
+
+            # pylint: disable=broad-exception-raised
+            raise Exception(
+                (
+                    "MeterProvider.shutdown failed because the following "
+                    "metric readers failed during shutdown:\n"
+                    f"{metric_reader_error_string}"
+                )
+            )
+
+    def get_meter(
+        self,
+        name: str,
+        version: Optional[str] = None,
+        schema_url: Optional[str] = None,
+        attributes: Optional[Attributes] = None,
+    ) -> Meter:
+        if self._disabled:
+            return NoOpMeter(name, version=version, schema_url=schema_url)
+
+        if self._shutdown:
+            _logger.warning(
+                "A shutdown `MeterProvider` can not provide a `Meter`"
+            )
+            return NoOpMeter(name, version=version, schema_url=schema_url)
+
+        if not name:
+            _logger.warning("Meter name cannot be None or empty.")
+            return NoOpMeter(name, version=version, schema_url=schema_url)
+
+        info = InstrumentationScope(name, version, schema_url, attributes)
+        with self._meter_lock:
+            if not self._meters.get(info):
+                # FIXME #2558 pass SDKConfig object to meter so that the meter
+                # has access to views.
+                self._meters[info] = Meter(
+                    info,
+                    self._measurement_consumer,
+                )
+            return self._meters[info]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
new file mode 100644
index 00000000..be81d70e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
@@ -0,0 +1,153 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from logging import getLogger
+from threading import Lock
+from time import time_ns
+from typing import Dict, List, Optional, Sequence
+
+from opentelemetry.metrics import Instrument
+from opentelemetry.sdk.metrics._internal.aggregation import (
+    Aggregation,
+    DefaultAggregation,
+    _Aggregation,
+    _SumAggregation,
+)
+from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
+from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.metrics._internal.point import DataPointT
+from opentelemetry.sdk.metrics._internal.view import View
+
+_logger = getLogger(__name__)
+
+
+class _ViewInstrumentMatch:
+    def __init__(
+        self,
+        view: View,
+        instrument: Instrument,
+        instrument_class_aggregation: Dict[type, Aggregation],
+    ):
+        self._view = view
+        self._instrument = instrument
+        self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
+        self._lock = Lock()
+        self._instrument_class_aggregation = instrument_class_aggregation
+        self._name = self._view._name or self._instrument.name
+        self._description = (
+            self._view._description or self._instrument.description
+        )
+        if not isinstance(self._view._aggregation, DefaultAggregation):
+            self._aggregation = self._view._aggregation._create_aggregation(
+                self._instrument,
+                None,
+                self._view._exemplar_reservoir_factory,
+                0,
+            )
+        else:
+            self._aggregation = self._instrument_class_aggregation[
+                self._instrument.__class__
+            ]._create_aggregation(
+                self._instrument,
+                None,
+                self._view._exemplar_reservoir_factory,
+                0,
+            )
+
+    def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
+        # pylint: disable=protected-access
+
+        result = (
+            self._name == other._name
+            and self._instrument.unit == other._instrument.unit
+            # The aggregation class is being used here instead of data point
+            # type since they are functionally equivalent.
+            and self._aggregation.__class__ == other._aggregation.__class__
+        )
+        if isinstance(self._aggregation, _SumAggregation):
+            result = (
+                result
+                and self._aggregation._instrument_is_monotonic
+                == other._aggregation._instrument_is_monotonic
+                and self._aggregation._instrument_aggregation_temporality
+                == other._aggregation._instrument_aggregation_temporality
+            )
+
+        return result
+
+    # pylint: disable=protected-access
+    def consume_measurement(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        if self._view._attribute_keys is not None:
+            attributes = {}
+
+            for key, value in (measurement.attributes or {}).items():
+                if key in self._view._attribute_keys:
+                    attributes[key] = value
+        elif measurement.attributes is not None:
+            attributes = measurement.attributes
+        else:
+            attributes = {}
+
+        aggr_key = frozenset(attributes.items())
+
+        if aggr_key not in self._attributes_aggregation:
+            with self._lock:
+                if aggr_key not in self._attributes_aggregation:
+                    if not isinstance(
+                        self._view._aggregation, DefaultAggregation
+                    ):
+                        aggregation = (
+                            self._view._aggregation._create_aggregation(
+                                self._instrument,
+                                attributes,
+                                self._view._exemplar_reservoir_factory,
+                                time_ns(),
+                            )
+                        )
+                    else:
+                        aggregation = self._instrument_class_aggregation[
+                            self._instrument.__class__
+                        ]._create_aggregation(
+                            self._instrument,
+                            attributes,
+                            self._view._exemplar_reservoir_factory,
+                            time_ns(),
+                        )
+                    self._attributes_aggregation[aggr_key] = aggregation
+
+        self._attributes_aggregation[aggr_key].aggregate(
+            measurement, should_sample_exemplar
+        )
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nanos: int,
+    ) -> Optional[Sequence[DataPointT]]:
+        data_points: List[DataPointT] = []
+        with self._lock:
+            for aggregation in self._attributes_aggregation.values():
+                data_point = aggregation.collect(
+                    collection_aggregation_temporality, collection_start_nanos
+                )
+                if data_point is not None:
+                    data_points.append(data_point)
+
+        # Returning here None instead of an empty list because the caller
+        # does not consume a sequence and to be consistent with the rest of
+        # collect methods that also return None.
+        return data_points or None
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py
new file mode 100644
index 00000000..8443d951
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py
@@ -0,0 +1,1475 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=too-many-lines
+
+from abc import ABC, abstractmethod
+from bisect import bisect_left
+from enum import IntEnum
+from functools import partial
+from logging import getLogger
+from math import inf
+from threading import Lock
+from typing import (
+    Callable,
+    Generic,
+    List,
+    Optional,
+    Sequence,
+    Type,
+    TypeVar,
+)
+
+from opentelemetry.metrics import (
+    Asynchronous,
+    Counter,
+    Histogram,
+    Instrument,
+    ObservableCounter,
+    ObservableGauge,
+    ObservableUpDownCounter,
+    Synchronous,
+    UpDownCounter,
+    _Gauge,
+)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+    Exemplar,
+    ExemplarReservoirBuilder,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import (
+    Buckets,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
+    Mapping,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.exponent_mapping import (
+    ExponentMapping,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.logarithm_mapping import (
+    LogarithmMapping,
+)
+from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.metrics._internal.point import Buckets as BucketsPoint
+from opentelemetry.sdk.metrics._internal.point import (
+    ExponentialHistogramDataPoint,
+    HistogramDataPoint,
+    NumberDataPoint,
+    Sum,
+)
+from opentelemetry.sdk.metrics._internal.point import Gauge as GaugePoint
+from opentelemetry.sdk.metrics._internal.point import (
+    Histogram as HistogramPoint,
+)
+from opentelemetry.util.types import Attributes
+
+_DataPointVarT = TypeVar("_DataPointVarT", NumberDataPoint, HistogramDataPoint)
+
+_logger = getLogger(__name__)
+
+
+class AggregationTemporality(IntEnum):
+    """
+    The temporality to use when aggregating data.
+
+    Can be one of the following values:
+    """
+
+    UNSPECIFIED = 0
+    DELTA = 1
+    CUMULATIVE = 2
+
+
+class _Aggregation(ABC, Generic[_DataPointVarT]):
+    def __init__(
+        self,
+        attributes: Attributes,
+        reservoir_builder: ExemplarReservoirBuilder,
+    ):
+        self._lock = Lock()
+        self._attributes = attributes
+        self._reservoir = reservoir_builder()
+        self._previous_point = None
+
+    @abstractmethod
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        """Aggregate a measurement.
+
+        Args:
+            measurement: Measurement to aggregate
+            should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not.
+        """
+
+    @abstractmethod
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[_DataPointVarT]:
+        pass
+
+    def _collect_exemplars(self) -> Sequence[Exemplar]:
+        """Returns the collected exemplars.
+
+        Returns:
+            The exemplars collected by the reservoir
+        """
+        return self._reservoir.collect(self._attributes)
+
+    def _sample_exemplar(
+        self, measurement: Measurement, should_sample_exemplar: bool
+    ) -> None:
+        """Offer the measurement to the exemplar reservoir for sampling.
+
+        It should be called within the each :ref:`aggregate` call.
+
+        Args:
+            measurement: The new measurement
+            should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not.
+        """
+        if should_sample_exemplar:
+            self._reservoir.offer(
+                measurement.value,
+                measurement.time_unix_nano,
+                measurement.attributes,
+                measurement.context,
+            )
+
+
+class _DropAggregation(_Aggregation):
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        pass
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[_DataPointVarT]:
+        pass
+
+
+class _SumAggregation(_Aggregation[Sum]):
+    def __init__(
+        self,
+        attributes: Attributes,
+        instrument_is_monotonic: bool,
+        instrument_aggregation_temporality: AggregationTemporality,
+        start_time_unix_nano: int,
+        reservoir_builder: ExemplarReservoirBuilder,
+    ):
+        super().__init__(attributes, reservoir_builder)
+
+        self._start_time_unix_nano = start_time_unix_nano
+        self._instrument_aggregation_temporality = (
+            instrument_aggregation_temporality
+        )
+        self._instrument_is_monotonic = instrument_is_monotonic
+
+        self._value = None
+
+        self._previous_collection_start_nano = self._start_time_unix_nano
+        self._previous_value = 0
+
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        with self._lock:
+            if self._value is None:
+                self._value = 0
+
+            self._value = self._value + measurement.value
+
+        self._sample_exemplar(measurement, should_sample_exemplar)
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[NumberDataPoint]:
+        """
+        Atomically return a point for the current value of the metric and
+        reset the aggregation value.
+
+        Synchronous instruments have a method which is called directly with
+        increments for a given quantity:
+
+        For example, an instrument that counts the amount of passengers in
+        every vehicle that crosses a certain point in a highway:
+
+        synchronous_instrument.add(2)
+        collect(...)  # 2 passengers are counted
+        synchronous_instrument.add(3)
+        collect(...)  # 3 passengers are counted
+        synchronous_instrument.add(1)
+        collect(...)  # 1 passenger is counted
+
+        In this case the instrument aggregation temporality is DELTA because
+        every value represents an increment to the count,
+
+        Asynchronous instruments have a callback which returns the total value
+        of a given quantity:
+
+        For example, an instrument that measures the amount of bytes written to
+        a certain hard drive:
+
+        callback() -> 1352
+        collect(...) # 1352 bytes have been written so far
+        callback() -> 2324
+        collect(...) # 2324 bytes have been written so far
+        callback() -> 4542
+        collect(...) # 4542 bytes have been written so far
+
+        In this case the instrument aggregation temporality is CUMULATIVE
+        because every value represents the total of the measurement.
+
+        There is also the collection aggregation temporality, which is passed
+        to this method. The collection aggregation temporality defines the
+        nature of the returned value by this aggregation.
+
+        When the collection aggregation temporality matches the
+        instrument aggregation temporality, then this method returns the
+        current value directly:
+
+        synchronous_instrument.add(2)
+        collect(DELTA) -> 2
+        synchronous_instrument.add(3)
+        collect(DELTA) -> 3
+        synchronous_instrument.add(1)
+        collect(DELTA) -> 1
+
+        callback() -> 1352
+        collect(CUMULATIVE) -> 1352
+        callback() -> 2324
+        collect(CUMULATIVE) -> 2324
+        callback() -> 4542
+        collect(CUMULATIVE) -> 4542
+
+        When the collection aggregation temporality does not match the
+        instrument aggregation temporality, then a conversion is made. For this
+        purpose, this aggregation keeps a private attribute,
+        self._previous_value.
+
+        When the instrument is synchronous:
+
+        self._previous_value is the sum of every previously
+        collected (delta) value. In this case, the returned (cumulative) value
+        will be:
+
+        self._previous_value + value
+
+        synchronous_instrument.add(2)
+        collect(CUMULATIVE) -> 2
+        synchronous_instrument.add(3)
+        collect(CUMULATIVE) -> 5
+        synchronous_instrument.add(1)
+        collect(CUMULATIVE) -> 6
+
+        Also, as a diagram:
+
+        time ->
+
+        self._previous_value
+        |-------------|
+
+        value (delta)
+                      |----|
+
+        returned value (cumulative)
+        |------------------|
+
+        When the instrument is asynchronous:
+
+        self._previous_value is the value of the previously
+        collected (cumulative) value. In this case, the returned (delta) value
+        will be:
+
+        value - self._previous_value
+
+        callback() -> 1352
+        collect(DELTA) -> 1352
+        callback() -> 2324
+        collect(DELTA) -> 972
+        callback() -> 4542
+        collect(DELTA) -> 2218
+
+        Also, as a diagram:
+
+        time ->
+
+        self._previous_value
+        |-------------|
+
+        value (cumulative)
+        |------------------|
+
+        returned value (delta)
+                      |----|
+        """
+
+        with self._lock:
+            value = self._value
+            self._value = None
+
+            if (
+                self._instrument_aggregation_temporality
+                is AggregationTemporality.DELTA
+            ):
+                # This happens when the corresponding instrument for this
+                # aggregation is synchronous.
+                if (
+                    collection_aggregation_temporality
+                    is AggregationTemporality.DELTA
+                ):
+                    previous_collection_start_nano = (
+                        self._previous_collection_start_nano
+                    )
+                    self._previous_collection_start_nano = (
+                        collection_start_nano
+                    )
+
+                    if value is None:
+                        return None
+
+                    return NumberDataPoint(
+                        attributes=self._attributes,
+                        exemplars=self._collect_exemplars(),
+                        start_time_unix_nano=previous_collection_start_nano,
+                        time_unix_nano=collection_start_nano,
+                        value=value,
+                    )
+
+                if value is None:
+                    value = 0
+
+                self._previous_value = value + self._previous_value
+
+                return NumberDataPoint(
+                    attributes=self._attributes,
+                    exemplars=self._collect_exemplars(),
+                    start_time_unix_nano=self._start_time_unix_nano,
+                    time_unix_nano=collection_start_nano,
+                    value=self._previous_value,
+                )
+
+            # This happens when the corresponding instrument for this
+            # aggregation is asynchronous.
+
+            if value is None:
+                # This happens when the corresponding instrument callback
+                # does not produce measurements.
+                return None
+
+            if (
+                collection_aggregation_temporality
+                is AggregationTemporality.DELTA
+            ):
+                result_value = value - self._previous_value
+
+                self._previous_value = value
+
+                previous_collection_start_nano = (
+                    self._previous_collection_start_nano
+                )
+                self._previous_collection_start_nano = collection_start_nano
+
+                return NumberDataPoint(
+                    attributes=self._attributes,
+                    exemplars=self._collect_exemplars(),
+                    start_time_unix_nano=previous_collection_start_nano,
+                    time_unix_nano=collection_start_nano,
+                    value=result_value,
+                )
+
+            return NumberDataPoint(
+                attributes=self._attributes,
+                exemplars=self._collect_exemplars(),
+                start_time_unix_nano=self._start_time_unix_nano,
+                time_unix_nano=collection_start_nano,
+                value=value,
+            )
+
+
+class _LastValueAggregation(_Aggregation[GaugePoint]):
+    def __init__(
+        self,
+        attributes: Attributes,
+        reservoir_builder: ExemplarReservoirBuilder,
+    ):
+        super().__init__(attributes, reservoir_builder)
+        self._value = None
+
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ):
+        with self._lock:
+            self._value = measurement.value
+
+        self._sample_exemplar(measurement, should_sample_exemplar)
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[_DataPointVarT]:
+        """
+        Atomically return a point for the current value of the metric.
+        """
+        with self._lock:
+            if self._value is None:
+                return None
+            value = self._value
+            self._value = None
+
+        exemplars = self._collect_exemplars()
+
+        return NumberDataPoint(
+            attributes=self._attributes,
+            exemplars=exemplars,
+            start_time_unix_nano=None,
+            time_unix_nano=collection_start_nano,
+            value=value,
+        )
+
+
+_DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES: Sequence[float] = (
+    0.0,
+    5.0,
+    10.0,
+    25.0,
+    50.0,
+    75.0,
+    100.0,
+    250.0,
+    500.0,
+    750.0,
+    1000.0,
+    2500.0,
+    5000.0,
+    7500.0,
+    10000.0,
+)
+
+
+class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]):
+    def __init__(
+        self,
+        attributes: Attributes,
+        instrument_aggregation_temporality: AggregationTemporality,
+        start_time_unix_nano: int,
+        reservoir_builder: ExemplarReservoirBuilder,
+        boundaries: Optional[Sequence[float]] = None,
+        record_min_max: bool = True,
+    ):
+        if boundaries is None:
+            boundaries = (
+                _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES
+            )
+        super().__init__(
+            attributes,
+            reservoir_builder=partial(
+                reservoir_builder, boundaries=boundaries
+            ),
+        )
+
+        self._instrument_aggregation_temporality = (
+            instrument_aggregation_temporality
+        )
+        self._start_time_unix_nano = start_time_unix_nano
+        self._boundaries = tuple(boundaries)
+        self._record_min_max = record_min_max
+
+        self._value = None
+        self._min = inf
+        self._max = -inf
+        self._sum = 0
+
+        self._previous_value = None
+        self._previous_min = inf
+        self._previous_max = -inf
+        self._previous_sum = 0
+
+        self._previous_collection_start_nano = self._start_time_unix_nano
+
+    def _get_empty_bucket_counts(self) -> List[int]:
+        return [0] * (len(self._boundaries) + 1)
+
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        with self._lock:
+            if self._value is None:
+                self._value = self._get_empty_bucket_counts()
+
+            measurement_value = measurement.value
+
+            self._sum += measurement_value
+
+            if self._record_min_max:
+                self._min = min(self._min, measurement_value)
+                self._max = max(self._max, measurement_value)
+
+            self._value[bisect_left(self._boundaries, measurement_value)] += 1
+
+        self._sample_exemplar(measurement, should_sample_exemplar)
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[_DataPointVarT]:
+        """
+        Atomically return a point for the current value of the metric.
+        """
+
+        with self._lock:
+            value = self._value
+            sum_ = self._sum
+            min_ = self._min
+            max_ = self._max
+
+            self._value = None
+            self._sum = 0
+            self._min = inf
+            self._max = -inf
+
+            if (
+                self._instrument_aggregation_temporality
+                is AggregationTemporality.DELTA
+            ):
+                # This happens when the corresponding instrument for this
+                # aggregation is synchronous.
+                if (
+                    collection_aggregation_temporality
+                    is AggregationTemporality.DELTA
+                ):
+                    previous_collection_start_nano = (
+                        self._previous_collection_start_nano
+                    )
+                    self._previous_collection_start_nano = (
+                        collection_start_nano
+                    )
+
+                    if value is None:
+                        return None
+
+                    return HistogramDataPoint(
+                        attributes=self._attributes,
+                        exemplars=self._collect_exemplars(),
+                        start_time_unix_nano=previous_collection_start_nano,
+                        time_unix_nano=collection_start_nano,
+                        count=sum(value),
+                        sum=sum_,
+                        bucket_counts=tuple(value),
+                        explicit_bounds=self._boundaries,
+                        min=min_,
+                        max=max_,
+                    )
+
+                if value is None:
+                    value = self._get_empty_bucket_counts()
+
+                if self._previous_value is None:
+                    self._previous_value = self._get_empty_bucket_counts()
+
+                self._previous_value = [
+                    value_element + previous_value_element
+                    for (
+                        value_element,
+                        previous_value_element,
+                    ) in zip(value, self._previous_value)
+                ]
+                self._previous_min = min(min_, self._previous_min)
+                self._previous_max = max(max_, self._previous_max)
+                self._previous_sum = sum_ + self._previous_sum
+
+                return HistogramDataPoint(
+                    attributes=self._attributes,
+                    exemplars=self._collect_exemplars(),
+                    start_time_unix_nano=self._start_time_unix_nano,
+                    time_unix_nano=collection_start_nano,
+                    count=sum(self._previous_value),
+                    sum=self._previous_sum,
+                    bucket_counts=tuple(self._previous_value),
+                    explicit_bounds=self._boundaries,
+                    min=self._previous_min,
+                    max=self._previous_max,
+                )
+
+            return None
+
+
+# pylint: disable=protected-access
+class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]):
+    # _min_max_size and _max_max_size are the smallest and largest values
+    # the max_size parameter may have, respectively.
+
+    # _min_max_size is is the smallest reasonable value which is small enough
+    # to contain the entire normal floating point range at the minimum scale.
+    _min_max_size = 2
+
+    # _max_max_size is an arbitrary limit meant to limit accidental creation of
+    # giant exponential bucket histograms.
+    _max_max_size = 16384
+
+    def __init__(
+        self,
+        attributes: Attributes,
+        reservoir_builder: ExemplarReservoirBuilder,
+        instrument_aggregation_temporality: AggregationTemporality,
+        start_time_unix_nano: int,
+        # This is the default maximum number of buckets per positive or
+        # negative number range.  The value 160 is specified by OpenTelemetry.
+        # See the derivation here:
+        # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exponential-bucket-histogram-aggregation)
+        max_size: int = 160,
+        max_scale: int = 20,
+    ):
+        # max_size is the maximum capacity of the positive and negative
+        # buckets.
+        # _sum is the sum of all the values aggregated by this aggregator.
+        # _count is the count of all calls to aggregate.
+        # _zero_count is the count of all the calls to aggregate when the value
+        # to be aggregated is exactly 0.
+        # _min is the smallest value aggregated by this aggregator.
+        # _max is the smallest value aggregated by this aggregator.
+        # _positive holds the positive values.
+        # _negative holds the negative values by their absolute value.
+        if max_size < self._min_max_size:
+            raise ValueError(
+                f"Buckets max size {max_size} is smaller than "
+                "minimum max size {self._min_max_size}"
+            )
+
+        if max_size > self._max_max_size:
+            raise ValueError(
+                f"Buckets max size {max_size} is larger than "
+                "maximum max size {self._max_max_size}"
+            )
+        if max_scale > 20:
+            _logger.warning(
+                "max_scale is set to %s which is "
+                "larger than the recommended value of 20",
+                max_scale,
+            )
+
+        # This aggregation is analogous to _ExplicitBucketHistogramAggregation,
+        # the only difference is that with every call to aggregate, the size
+        # and amount of buckets can change (in
+        # _ExplicitBucketHistogramAggregation both size and amount of buckets
+        # remain constant once it is instantiated).
+
+        super().__init__(
+            attributes,
+            reservoir_builder=partial(
+                reservoir_builder, size=min(20, max_size)
+            ),
+        )
+
+        self._instrument_aggregation_temporality = (
+            instrument_aggregation_temporality
+        )
+        self._start_time_unix_nano = start_time_unix_nano
+        self._max_size = max_size
+        self._max_scale = max_scale
+
+        self._value_positive = None
+        self._value_negative = None
+        self._min = inf
+        self._max = -inf
+        self._sum = 0
+        self._count = 0
+        self._zero_count = 0
+        self._scale = None
+
+        self._previous_value_positive = None
+        self._previous_value_negative = None
+        self._previous_min = inf
+        self._previous_max = -inf
+        self._previous_sum = 0
+        self._previous_count = 0
+        self._previous_zero_count = 0
+        self._previous_scale = None
+
+        self._previous_collection_start_nano = self._start_time_unix_nano
+
+        self._mapping = self._new_mapping(self._max_scale)
+
+    def aggregate(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        # pylint: disable=too-many-branches,too-many-statements, too-many-locals
+
+        with self._lock:
+            if self._value_positive is None:
+                self._value_positive = Buckets()
+            if self._value_negative is None:
+                self._value_negative = Buckets()
+
+            measurement_value = measurement.value
+
+            self._sum += measurement_value
+
+            self._min = min(self._min, measurement_value)
+            self._max = max(self._max, measurement_value)
+
+            self._count += 1
+
+            if measurement_value == 0:
+                self._zero_count += 1
+
+                if self._count == self._zero_count:
+                    self._scale = 0
+
+                return
+
+            if measurement_value > 0:
+                value = self._value_positive
+
+            else:
+                measurement_value = -measurement_value
+                value = self._value_negative
+
+            # The following code finds out if it is necessary to change the
+            # buckets to hold the incoming measurement_value, changes them if
+            # necessary. This process does not exist in
+            # _ExplicitBucketHistogram aggregation because the buckets there
+            # are constant in size and amount.
+            index = self._mapping.map_to_index(measurement_value)
+
+            is_rescaling_needed = False
+            low, high = 0, 0
+
+            if len(value) == 0:
+                value.index_start = index
+                value.index_end = index
+                value.index_base = index
+
+            elif (
+                index < value.index_start
+                and (value.index_end - index) >= self._max_size
+            ):
+                is_rescaling_needed = True
+                low = index
+                high = value.index_end
+
+            elif (
+                index > value.index_end
+                and (index - value.index_start) >= self._max_size
+            ):
+                is_rescaling_needed = True
+                low = value.index_start
+                high = index
+
+            if is_rescaling_needed:
+                scale_change = self._get_scale_change(low, high)
+                self._downscale(
+                    scale_change,
+                    self._value_positive,
+                    self._value_negative,
+                )
+                self._mapping = self._new_mapping(
+                    self._mapping.scale - scale_change
+                )
+
+                index = self._mapping.map_to_index(measurement_value)
+
+            self._scale = self._mapping.scale
+
+            if index < value.index_start:
+                span = value.index_end - index
+
+                if span >= len(value.counts):
+                    value.grow(span + 1, self._max_size)
+
+                value.index_start = index
+
+            elif index > value.index_end:
+                span = index - value.index_start
+
+                if span >= len(value.counts):
+                    value.grow(span + 1, self._max_size)
+
+                value.index_end = index
+
+            bucket_index = index - value.index_base
+
+            if bucket_index < 0:
+                bucket_index += len(value.counts)
+
+            # Now the buckets have been changed if needed and bucket_index will
+            # be used to increment the counter of the bucket that needs to be
+            # incremented.
+
+            # This is analogous to
+            # self._value[bisect_left(self._boundaries, measurement_value)] += 1
+            # in _ExplicitBucketHistogramAggregation.aggregate
+            value.increment_bucket(bucket_index)
+
+        self._sample_exemplar(measurement, should_sample_exemplar)
+
+    def collect(
+        self,
+        collection_aggregation_temporality: AggregationTemporality,
+        collection_start_nano: int,
+    ) -> Optional[_DataPointVarT]:
+        """
+        Atomically return a point for the current value of the metric.
+        """
+
+        # pylint: disable=too-many-statements, too-many-locals
+        with self._lock:
+            value_positive = self._value_positive
+            value_negative = self._value_negative
+            sum_ = self._sum
+            min_ = self._min
+            max_ = self._max
+            count = self._count
+            zero_count = self._zero_count
+            scale = self._scale
+
+            self._value_positive = None
+            self._value_negative = None
+            self._sum = 0
+            self._min = inf
+            self._max = -inf
+            self._count = 0
+            self._zero_count = 0
+            self._scale = None
+
+            if (
+                self._instrument_aggregation_temporality
+                is AggregationTemporality.DELTA
+            ):
+                # This happens when the corresponding instrument for this
+                # aggregation is synchronous.
+                if (
+                    collection_aggregation_temporality
+                    is AggregationTemporality.DELTA
+                ):
+                    previous_collection_start_nano = (
+                        self._previous_collection_start_nano
+                    )
+                    self._previous_collection_start_nano = (
+                        collection_start_nano
+                    )
+
+                    if value_positive is None and value_negative is None:
+                        return None
+
+                    return ExponentialHistogramDataPoint(
+                        attributes=self._attributes,
+                        exemplars=self._collect_exemplars(),
+                        start_time_unix_nano=previous_collection_start_nano,
+                        time_unix_nano=collection_start_nano,
+                        count=count,
+                        sum=sum_,
+                        scale=scale,
+                        zero_count=zero_count,
+                        positive=BucketsPoint(
+                            offset=value_positive.offset,
+                            bucket_counts=(value_positive.get_offset_counts()),
+                        ),
+                        negative=BucketsPoint(
+                            offset=value_negative.offset,
+                            bucket_counts=(value_negative.get_offset_counts()),
+                        ),
+                        # FIXME: Find the right value for flags
+                        flags=0,
+                        min=min_,
+                        max=max_,
+                    )
+
+                # Here collection_temporality is CUMULATIVE.
+                # instrument_temporality is always DELTA for the time being.
+                # Here we need to handle the case where:
+                # collect is called after at least one other call to collect
+                # (there is data in previous buckets, a call to merge is needed
+                # to handle possible differences in bucket sizes).
+                # collect is called without another call previous call to
+                # collect was made (there is no previous buckets, previous,
+                # empty buckets that are the same scale of the current buckets
+                # need to be made so that they can be cumulatively aggregated
+                # to the current buckets).
+
+                if (
+                    value_positive is None
+                    and self._previous_value_positive is None
+                ):
+                    # This happens if collect is called for the first time
+                    # and aggregate has not yet been called.
+                    value_positive = Buckets()
+                    self._previous_value_positive = value_positive.copy_empty()
+                if (
+                    value_negative is None
+                    and self._previous_value_negative is None
+                ):
+                    value_negative = Buckets()
+                    self._previous_value_negative = value_negative.copy_empty()
+                if scale is None and self._previous_scale is None:
+                    scale = self._mapping.scale
+                    self._previous_scale = scale
+
+                if (
+                    value_positive is not None
+                    and self._previous_value_positive is None
+                ):
+                    # This happens when collect is called the very first time
+                    # and aggregate has been called before.
+
+                    # We need previous buckets to add them to the current ones.
+                    # When collect is called for the first time, there are no
+                    # previous buckets, so we need to create empty buckets to
+                    # add them to the current ones. The addition of empty
+                    # buckets to the current ones will result in the current
+                    # ones unchanged.
+
+                    # The way the previous buckets are generated here is
+                    # different from the explicit bucket histogram where
+                    # the size and amount of the buckets does not change once
+                    # they are instantiated. Here, the size and amount of the
+                    # buckets can change with every call to aggregate. In order
+                    # to get empty buckets that can be added to the current
+                    # ones resulting in the current ones unchanged we need to
+                    # generate empty buckets that have the same size and amount
+                    # as the current ones, this is what copy_empty does.
+                    self._previous_value_positive = value_positive.copy_empty()
+                if (
+                    value_negative is not None
+                    and self._previous_value_negative is None
+                ):
+                    self._previous_value_negative = value_negative.copy_empty()
+                if scale is not None and self._previous_scale is None:
+                    self._previous_scale = scale
+
+                if (
+                    value_positive is None
+                    and self._previous_value_positive is not None
+                ):
+                    value_positive = self._previous_value_positive.copy_empty()
+                if (
+                    value_negative is None
+                    and self._previous_value_negative is not None
+                ):
+                    value_negative = self._previous_value_negative.copy_empty()
+                if scale is None and self._previous_scale is not None:
+                    scale = self._previous_scale
+
+                min_scale = min(self._previous_scale, scale)
+
+                low_positive, high_positive = (
+                    self._get_low_high_previous_current(
+                        self._previous_value_positive,
+                        value_positive,
+                        scale,
+                        min_scale,
+                    )
+                )
+                low_negative, high_negative = (
+                    self._get_low_high_previous_current(
+                        self._previous_value_negative,
+                        value_negative,
+                        scale,
+                        min_scale,
+                    )
+                )
+
+                min_scale = min(
+                    min_scale
+                    - self._get_scale_change(low_positive, high_positive),
+                    min_scale
+                    - self._get_scale_change(low_negative, high_negative),
+                )
+
+                self._downscale(
+                    self._previous_scale - min_scale,
+                    self._previous_value_positive,
+                    self._previous_value_negative,
+                )
+
+                # self._merge adds the values from value to
+                # self._previous_value, this is analogous to
+                # self._previous_value = [
+                #     value_element + previous_value_element
+                #     for (
+                #         value_element,
+                #         previous_value_element,
+                #     ) in zip(value, self._previous_value)
+                # ]
+                # in _ExplicitBucketHistogramAggregation.collect.
+                self._merge(
+                    self._previous_value_positive,
+                    value_positive,
+                    scale,
+                    min_scale,
+                    collection_aggregation_temporality,
+                )
+                self._merge(
+                    self._previous_value_negative,
+                    value_negative,
+                    scale,
+                    min_scale,
+                    collection_aggregation_temporality,
+                )
+
+                self._previous_min = min(min_, self._previous_min)
+                self._previous_max = max(max_, self._previous_max)
+                self._previous_sum = sum_ + self._previous_sum
+                self._previous_count = count + self._previous_count
+                self._previous_zero_count = (
+                    zero_count + self._previous_zero_count
+                )
+                self._previous_scale = min_scale
+
+                return ExponentialHistogramDataPoint(
+                    attributes=self._attributes,
+                    exemplars=self._collect_exemplars(),
+                    start_time_unix_nano=self._start_time_unix_nano,
+                    time_unix_nano=collection_start_nano,
+                    count=self._previous_count,
+                    sum=self._previous_sum,
+                    scale=self._previous_scale,
+                    zero_count=self._previous_zero_count,
+                    positive=BucketsPoint(
+                        offset=self._previous_value_positive.offset,
+                        bucket_counts=(
+                            self._previous_value_positive.get_offset_counts()
+                        ),
+                    ),
+                    negative=BucketsPoint(
+                        offset=self._previous_value_negative.offset,
+                        bucket_counts=(
+                            self._previous_value_negative.get_offset_counts()
+                        ),
+                    ),
+                    # FIXME: Find the right value for flags
+                    flags=0,
+                    min=self._previous_min,
+                    max=self._previous_max,
+                )
+
+            return None
+
+    def _get_low_high_previous_current(
+        self,
+        previous_point_buckets,
+        current_point_buckets,
+        current_scale,
+        min_scale,
+    ):
+        (previous_point_low, previous_point_high) = self._get_low_high(
+            previous_point_buckets, self._previous_scale, min_scale
+        )
+        (current_point_low, current_point_high) = self._get_low_high(
+            current_point_buckets, current_scale, min_scale
+        )
+
+        if current_point_low > current_point_high:
+            low = previous_point_low
+            high = previous_point_high
+
+        elif previous_point_low > previous_point_high:
+            low = current_point_low
+            high = current_point_high
+
+        else:
+            low = min(previous_point_low, current_point_low)
+            high = max(previous_point_high, current_point_high)
+
+        return low, high
+
+    @staticmethod
+    def _get_low_high(buckets, scale, min_scale):
+        if buckets.counts == [0]:
+            return 0, -1
+
+        shift = scale - min_scale
+
+        return buckets.index_start >> shift, buckets.index_end >> shift
+
+    @staticmethod
+    def _new_mapping(scale: int) -> Mapping:
+        if scale <= 0:
+            return ExponentMapping(scale)
+        return LogarithmMapping(scale)
+
+    def _get_scale_change(self, low, high):
+        change = 0
+
+        while high - low >= self._max_size:
+            high = high >> 1
+            low = low >> 1
+
+            change += 1
+
+        return change
+
+    @staticmethod
+    def _downscale(change: int, positive, negative):
+        if change == 0:
+            return
+
+        if change < 0:
+            # pylint: disable=broad-exception-raised
+            raise Exception("Invalid change of scale")
+
+        positive.downscale(change)
+        negative.downscale(change)
+
+    def _merge(
+        self,
+        previous_buckets: Buckets,
+        current_buckets: Buckets,
+        current_scale,
+        min_scale,
+        aggregation_temporality,
+    ):
+        current_change = current_scale - min_scale
+
+        for current_bucket_index, current_bucket in enumerate(
+            current_buckets.counts
+        ):
+            if current_bucket == 0:
+                continue
+
+            # Not considering the case where len(previous_buckets) == 0. This
+            # would not happen because self._previous_point is only assigned to
+            # an ExponentialHistogramDataPoint object if self._count != 0.
+
+            current_index = current_buckets.index_base + current_bucket_index
+            if current_index > current_buckets.index_end:
+                current_index -= len(current_buckets.counts)
+
+            index = current_index >> current_change
+
+            if index < previous_buckets.index_start:
+                span = previous_buckets.index_end - index
+
+                if span >= self._max_size:
+                    # pylint: disable=broad-exception-raised
+                    raise Exception("Incorrect merge scale")
+
+                if span >= len(previous_buckets.counts):
+                    previous_buckets.grow(span + 1, self._max_size)
+
+                previous_buckets.index_start = index
+
+            if index > previous_buckets.index_end:
+                span = index - previous_buckets.index_start
+
+                if span >= self._max_size:
+                    # pylint: disable=broad-exception-raised
+                    raise Exception("Incorrect merge scale")
+
+                if span >= len(previous_buckets.counts):
+                    previous_buckets.grow(span + 1, self._max_size)
+
+                previous_buckets.index_end = index
+
+            bucket_index = index - previous_buckets.index_base
+
+            if bucket_index < 0:
+                bucket_index += len(previous_buckets.counts)
+
+            if aggregation_temporality is AggregationTemporality.DELTA:
+                current_bucket = -current_bucket
+
+            previous_buckets.increment_bucket(
+                bucket_index, increment=current_bucket
+            )
+
+
+class Aggregation(ABC):
+    """
+    Base class for all aggregation types.
+    """
+
+    @abstractmethod
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        """Creates an aggregation"""
+
+
+class DefaultAggregation(Aggregation):
+    """
+    The default aggregation to be used in a `View`.
+
+    This aggregation will create an actual aggregation depending on the
+    instrument type, as specified next:
+
+    ==================================================== ====================================
+    Instrument                                           Aggregation
+    ==================================================== ====================================
+    `opentelemetry.sdk.metrics.Counter`                  `SumAggregation`
+    `opentelemetry.sdk.metrics.UpDownCounter`            `SumAggregation`
+    `opentelemetry.sdk.metrics.ObservableCounter`        `SumAggregation`
+    `opentelemetry.sdk.metrics.ObservableUpDownCounter`  `SumAggregation`
+    `opentelemetry.sdk.metrics.Histogram`                `ExplicitBucketHistogramAggregation`
+    `opentelemetry.sdk.metrics.ObservableGauge`          `LastValueAggregation`
+    ==================================================== ====================================
+    """
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        # pylint: disable=too-many-return-statements
+        if isinstance(instrument, Counter):
+            return _SumAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_SumAggregation),
+                instrument_is_monotonic=True,
+                instrument_aggregation_temporality=(
+                    AggregationTemporality.DELTA
+                ),
+                start_time_unix_nano=start_time_unix_nano,
+            )
+        if isinstance(instrument, UpDownCounter):
+            return _SumAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_SumAggregation),
+                instrument_is_monotonic=False,
+                instrument_aggregation_temporality=(
+                    AggregationTemporality.DELTA
+                ),
+                start_time_unix_nano=start_time_unix_nano,
+            )
+
+        if isinstance(instrument, ObservableCounter):
+            return _SumAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_SumAggregation),
+                instrument_is_monotonic=True,
+                instrument_aggregation_temporality=(
+                    AggregationTemporality.CUMULATIVE
+                ),
+                start_time_unix_nano=start_time_unix_nano,
+            )
+
+        if isinstance(instrument, ObservableUpDownCounter):
+            return _SumAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_SumAggregation),
+                instrument_is_monotonic=False,
+                instrument_aggregation_temporality=(
+                    AggregationTemporality.CUMULATIVE
+                ),
+                start_time_unix_nano=start_time_unix_nano,
+            )
+
+        if isinstance(instrument, Histogram):
+            boundaries = instrument._advisory.explicit_bucket_boundaries
+            return _ExplicitBucketHistogramAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(
+                    _ExplicitBucketHistogramAggregation
+                ),
+                instrument_aggregation_temporality=(
+                    AggregationTemporality.DELTA
+                ),
+                boundaries=boundaries,
+                start_time_unix_nano=start_time_unix_nano,
+            )
+
+        if isinstance(instrument, ObservableGauge):
+            return _LastValueAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_LastValueAggregation),
+            )
+
+        if isinstance(instrument, _Gauge):
+            return _LastValueAggregation(
+                attributes,
+                reservoir_builder=reservoir_factory(_LastValueAggregation),
+            )
+
+        # pylint: disable=broad-exception-raised
+        raise Exception(f"Invalid instrument type {type(instrument)} found")
+
+
+class ExponentialBucketHistogramAggregation(Aggregation):
+    def __init__(
+        self,
+        max_size: int = 160,
+        max_scale: int = 20,
+    ):
+        self._max_size = max_size
+        self._max_scale = max_scale
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED
+        if isinstance(instrument, Synchronous):
+            instrument_aggregation_temporality = AggregationTemporality.DELTA
+        elif isinstance(instrument, Asynchronous):
+            instrument_aggregation_temporality = (
+                AggregationTemporality.CUMULATIVE
+            )
+
+        return _ExponentialBucketHistogramAggregation(
+            attributes,
+            reservoir_factory(_ExponentialBucketHistogramAggregation),
+            instrument_aggregation_temporality,
+            start_time_unix_nano,
+            max_size=self._max_size,
+            max_scale=self._max_scale,
+        )
+
+
+class ExplicitBucketHistogramAggregation(Aggregation):
+    """This aggregation informs the SDK to collect:
+
+    - Count of Measurement values falling within explicit bucket boundaries.
+    - Arithmetic sum of Measurement values in population. This SHOULD NOT be collected when used with instruments that record negative measurements, e.g. UpDownCounter or ObservableGauge.
+    - Min (optional) Measurement value in population.
+    - Max (optional) Measurement value in population.
+
+
+    Args:
+        boundaries: Array of increasing values representing explicit bucket boundary values.
+        record_min_max: Whether to record min and max.
+    """
+
+    def __init__(
+        self,
+        boundaries: Optional[Sequence[float]] = None,
+        record_min_max: bool = True,
+    ) -> None:
+        self._boundaries = boundaries
+        self._record_min_max = record_min_max
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED
+        if isinstance(instrument, Synchronous):
+            instrument_aggregation_temporality = AggregationTemporality.DELTA
+        elif isinstance(instrument, Asynchronous):
+            instrument_aggregation_temporality = (
+                AggregationTemporality.CUMULATIVE
+            )
+
+        if self._boundaries is None:
+            self._boundaries = (
+                instrument._advisory.explicit_bucket_boundaries
+                or _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES
+            )
+
+        return _ExplicitBucketHistogramAggregation(
+            attributes,
+            instrument_aggregation_temporality,
+            start_time_unix_nano,
+            reservoir_factory(_ExplicitBucketHistogramAggregation),
+            self._boundaries,
+            self._record_min_max,
+        )
+
+
+class SumAggregation(Aggregation):
+    """This aggregation informs the SDK to collect:
+
+    - The arithmetic sum of Measurement values.
+    """
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED
+        if isinstance(instrument, Synchronous):
+            instrument_aggregation_temporality = AggregationTemporality.DELTA
+        elif isinstance(instrument, Asynchronous):
+            instrument_aggregation_temporality = (
+                AggregationTemporality.CUMULATIVE
+            )
+
+        return _SumAggregation(
+            attributes,
+            isinstance(instrument, (Counter, ObservableCounter)),
+            instrument_aggregation_temporality,
+            start_time_unix_nano,
+            reservoir_factory(_SumAggregation),
+        )
+
+
+class LastValueAggregation(Aggregation):
+    """
+    This aggregation informs the SDK to collect:
+
+    - The last Measurement.
+    - The timestamp of the last Measurement.
+    """
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        return _LastValueAggregation(
+            attributes,
+            reservoir_builder=reservoir_factory(_LastValueAggregation),
+        )
+
+
+class DropAggregation(Aggregation):
+    """Using this aggregation will make all measurements be ignored."""
+
+    def _create_aggregation(
+        self,
+        instrument: Instrument,
+        attributes: Attributes,
+        reservoir_factory: Callable[
+            [Type[_Aggregation]], ExemplarReservoirBuilder
+        ],
+        start_time_unix_nano: int,
+    ) -> _Aggregation:
+        return _DropAggregation(
+            attributes, reservoir_factory(_DropAggregation)
+        )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py
new file mode 100644
index 00000000..0f8c3a75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py
@@ -0,0 +1,17 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class MetricsTimeoutError(Exception):
+    """Raised when a metrics function times out"""
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py
new file mode 100644
index 00000000..ee93dd18
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py
@@ -0,0 +1,39 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .exemplar import Exemplar
+from .exemplar_filter import (
+    AlwaysOffExemplarFilter,
+    AlwaysOnExemplarFilter,
+    ExemplarFilter,
+    TraceBasedExemplarFilter,
+)
+from .exemplar_reservoir import (
+    AlignedHistogramBucketExemplarReservoir,
+    ExemplarReservoir,
+    ExemplarReservoirBuilder,
+    SimpleFixedSizeExemplarReservoir,
+)
+
+__all__ = [
+    "Exemplar",
+    "ExemplarFilter",
+    "AlwaysOffExemplarFilter",
+    "AlwaysOnExemplarFilter",
+    "TraceBasedExemplarFilter",
+    "AlignedHistogramBucketExemplarReservoir",
+    "ExemplarReservoir",
+    "ExemplarReservoirBuilder",
+    "SimpleFixedSizeExemplarReservoir",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py
new file mode 100644
index 00000000..95582e16
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py
@@ -0,0 +1,50 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import dataclasses
+from typing import Optional, Union
+
+from opentelemetry.util.types import Attributes
+
+
+@dataclasses.dataclass(frozen=True)
+class Exemplar:
+    """A representation of an exemplar, which is a sample input measurement.
+
+    Exemplars also hold information about the environment when the measurement
+    was recorded, for example the span and trace ID of the active span when the
+    exemplar was recorded.
+
+    Attributes
+        trace_id: (optional) The trace associated with a recording
+        span_id: (optional) The span associated with a recording
+        time_unix_nano: The time of the observation
+        value: The recorded value
+        filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made.
+
+    References:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
+    """
+
+    # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
+    # one will come from napoleon extension and the other from autodoc extension. This
+    # will raise an sphinx error of duplicated object description
+    # See https://github.com/sphinx-doc/sphinx/issues/8664
+
+    filtered_attributes: Attributes
+    value: Union[int, float]
+    time_unix_nano: int
+    span_id: Optional[int] = None
+    trace_id: Optional[int] = None
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py
new file mode 100644
index 00000000..8961d101
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py
@@ -0,0 +1,134 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from typing import Union
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.trace.span import INVALID_SPAN
+from opentelemetry.util.types import Attributes
+
+
+class ExemplarFilter(ABC):
+    """``ExemplarFilter`` determines which measurements are eligible for becoming an
+    ``Exemplar``.
+
+    Exemplar filters are used to filter measurements before attempting to store them
+    in a reservoir.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter
+    """
+
+    @abstractmethod
+    def should_sample(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> bool:
+        """Returns whether or not a reservoir should attempt to filter a measurement.
+
+        Args:
+            value: The value of the measurement
+            timestamp: A timestamp that best represents when the measurement was taken
+            attributes: The complete set of measurement attributes
+            context: The Context of the measurement
+        """
+        raise NotImplementedError(
+            "ExemplarFilter.should_sample is not implemented"
+        )
+
+
+class AlwaysOnExemplarFilter(ExemplarFilter):
+    """An ExemplarFilter which makes all measurements eligible for being an Exemplar.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson
+    """
+
+    def should_sample(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> bool:
+        """Returns whether or not a reservoir should attempt to filter a measurement.
+
+        Args:
+            value: The value of the measurement
+            timestamp: A timestamp that best represents when the measurement was taken
+            attributes: The complete set of measurement attributes
+            context: The Context of the measurement
+        """
+        return True
+
+
+class AlwaysOffExemplarFilter(ExemplarFilter):
+    """An ExemplarFilter which makes no measurements eligible for being an Exemplar.
+
+    Using this ExemplarFilter is as good as disabling Exemplar feature.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff
+    """
+
+    def should_sample(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> bool:
+        """Returns whether or not a reservoir should attempt to filter a measurement.
+
+        Args:
+            value: The value of the measurement
+            timestamp: A timestamp that best represents when the measurement was taken
+            attributes: The complete set of measurement attributes
+            context: The Context of the measurement
+        """
+        return False
+
+
+class TraceBasedExemplarFilter(ExemplarFilter):
+    """An ExemplarFilter which makes those measurements eligible for being an Exemplar,
+    which are recorded in the context of a sampled parent span.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased
+    """
+
+    def should_sample(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> bool:
+        """Returns whether or not a reservoir should attempt to filter a measurement.
+
+        Args:
+            value: The value of the measurement
+            timestamp: A timestamp that best represents when the measurement was taken
+            attributes: The complete set of measurement attributes
+            context: The Context of the measurement
+        """
+        span = trace.get_current_span(context)
+        if span == INVALID_SPAN:
+            return False
+        return span.get_span_context().trace_flags.sampled
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py
new file mode 100644
index 00000000..22d1ee9f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py
@@ -0,0 +1,332 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from collections import defaultdict
+from random import randrange
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    List,
+    Mapping,
+    Optional,
+    Sequence,
+    Union,
+)
+
+from opentelemetry import trace
+from opentelemetry.context import Context
+from opentelemetry.trace.span import INVALID_SPAN
+from opentelemetry.util.types import Attributes
+
+from .exemplar import Exemplar
+
+
+class ExemplarReservoir(ABC):
+    """ExemplarReservoir provide a method to offer measurements to the reservoir
+    and another to collect accumulated Exemplars.
+
+    Note:
+        The constructor MUST accept ``**kwargs`` that may be set from aggregation
+        parameters.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir
+    """
+
+    @abstractmethod
+    def offer(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> None:
+        """Offers a measurement to be sampled.
+
+        Args:
+            value: Measured value
+            time_unix_nano: Measurement instant
+            attributes: Measurement attributes
+            context: Measurement context
+        """
+        raise NotImplementedError("ExemplarReservoir.offer is not implemented")
+
+    @abstractmethod
+    def collect(self, point_attributes: Attributes) -> List[Exemplar]:
+        """Returns accumulated Exemplars and also resets the reservoir for the next
+        sampling period
+
+        Args:
+            point_attributes: The attributes associated with metric point.
+
+        Returns:
+            a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
+            exemplars contain the attributes that were filtered out by the aggregator,
+            but recorded alongside the original measurement.
+        """
+        raise NotImplementedError(
+            "ExemplarReservoir.collect is not implemented"
+        )
+
+
+class ExemplarBucket:
+    def __init__(self) -> None:
+        self.__value: Union[int, float] = 0
+        self.__attributes: Attributes = None
+        self.__time_unix_nano: int = 0
+        self.__span_id: Optional[int] = None
+        self.__trace_id: Optional[int] = None
+        self.__offered: bool = False
+
+    def offer(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> None:
+        """Offers a measurement to be sampled.
+
+        Args:
+            value: Measured value
+            time_unix_nano: Measurement instant
+            attributes: Measurement attributes
+            context: Measurement context
+        """
+        self.__value = value
+        self.__time_unix_nano = time_unix_nano
+        self.__attributes = attributes
+        span = trace.get_current_span(context)
+        if span != INVALID_SPAN:
+            span_context = span.get_span_context()
+            self.__span_id = span_context.span_id
+            self.__trace_id = span_context.trace_id
+
+        self.__offered = True
+
+    def collect(self, point_attributes: Attributes) -> Optional[Exemplar]:
+        """May return an Exemplar and resets the bucket for the next sampling period."""
+        if not self.__offered:
+            return None
+
+        # filters out attributes from the measurement that are already included in the metric data point
+        # See the specification for more details:
+        # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
+        filtered_attributes = (
+            {
+                k: v
+                for k, v in self.__attributes.items()
+                if k not in point_attributes
+            }
+            if self.__attributes
+            else None
+        )
+
+        exemplar = Exemplar(
+            filtered_attributes,
+            self.__value,
+            self.__time_unix_nano,
+            self.__span_id,
+            self.__trace_id,
+        )
+        self.__reset()
+        return exemplar
+
+    def __reset(self) -> None:
+        """Reset the bucket state after a collection cycle."""
+        self.__value = 0
+        self.__attributes = {}
+        self.__time_unix_nano = 0
+        self.__span_id = None
+        self.__trace_id = None
+        self.__offered = False
+
+
+class BucketIndexError(ValueError):
+    """An exception raised when the bucket index cannot be found."""
+
+
+class FixedSizeExemplarReservoirABC(ExemplarReservoir):
+    """Abstract class for a reservoir with fixed size."""
+
+    def __init__(self, size: int, **kwargs) -> None:
+        super().__init__(**kwargs)
+        self._size: int = size
+        self._reservoir_storage: Mapping[int, ExemplarBucket] = defaultdict(
+            ExemplarBucket
+        )
+
+    def collect(self, point_attributes: Attributes) -> List[Exemplar]:
+        """Returns accumulated Exemplars and also resets the reservoir for the next
+        sampling period
+
+        Args:
+            point_attributes: The attributes associated with metric point.
+
+        Returns:
+            a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
+            exemplars contain the attributes that were filtered out by the aggregator,
+            but recorded alongside the original measurement.
+        """
+        exemplars = [
+            e
+            for e in (
+                bucket.collect(point_attributes)
+                for _, bucket in sorted(self._reservoir_storage.items())
+            )
+            if e is not None
+        ]
+        self._reset()
+        return exemplars
+
+    def offer(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> None:
+        """Offers a measurement to be sampled.
+
+        Args:
+            value: Measured value
+            time_unix_nano: Measurement instant
+            attributes: Measurement attributes
+            context: Measurement context
+        """
+        try:
+            index = self._find_bucket_index(
+                value, time_unix_nano, attributes, context
+            )
+
+            self._reservoir_storage[index].offer(
+                value, time_unix_nano, attributes, context
+            )
+        except BucketIndexError:
+            # Ignore invalid bucket index
+            pass
+
+    @abstractmethod
+    def _find_bucket_index(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> int:
+        """Determines the bucket index for the given measurement.
+
+        It should be implemented by subclasses based on specific strategies.
+
+        Args:
+            value: Measured value
+            time_unix_nano: Measurement instant
+            attributes: Measurement attributes
+            context: Measurement context
+
+        Returns:
+            The bucket index
+
+        Raises:
+            BucketIndexError: If no bucket index can be found.
+        """
+
+    def _reset(self) -> None:
+        """Reset the reservoir by resetting any stateful logic after a collection cycle."""
+
+
+class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC):
+    """This reservoir uses an uniformly-weighted sampling algorithm based on the number
+    of samples the reservoir has seen so far to determine if the offered measurements
+    should be sampled.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+    """
+
+    def __init__(self, size: int = 1, **kwargs) -> None:
+        super().__init__(size, **kwargs)
+        self._measurements_seen: int = 0
+
+    def _reset(self) -> None:
+        super()._reset()
+        self._measurements_seen = 0
+
+    def _find_bucket_index(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> int:
+        self._measurements_seen += 1
+        if self._measurements_seen < self._size:
+            return self._measurements_seen - 1
+
+        index = randrange(0, self._measurements_seen)
+        if index < self._size:
+            return index
+
+        raise BucketIndexError("Unable to find the bucket index.")
+
+
+class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC):
+    """This Exemplar reservoir takes a configuration parameter that is the
+    configuration of a Histogram. This implementation keeps the last seen measurement
+    that falls within a histogram bucket.
+
+    Reference:
+        https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir
+    """
+
+    def __init__(self, boundaries: Sequence[float], **kwargs) -> None:
+        super().__init__(len(boundaries) + 1, **kwargs)
+        self._boundaries: Sequence[float] = boundaries
+
+    def offer(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> None:
+        """Offers a measurement to be sampled."""
+        index = self._find_bucket_index(
+            value, time_unix_nano, attributes, context
+        )
+        self._reservoir_storage[index].offer(
+            value, time_unix_nano, attributes, context
+        )
+
+    def _find_bucket_index(
+        self,
+        value: Union[int, float],
+        time_unix_nano: int,
+        attributes: Attributes,
+        context: Context,
+    ) -> int:
+        for index, boundary in enumerate(self._boundaries):
+            if value <= boundary:
+                return index
+        return len(self._boundaries)
+
+
+ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir]
+ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder.
+
+It may receive the Aggregation parameters it is bounded to; e.g.
+the _ExplicitBucketHistogramAggregation will provide the boundaries.
+"""
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py
new file mode 100644
index 00000000..e8a93326
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py
@@ -0,0 +1,190 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from math import ceil, log2
+
+
+class Buckets:
+    # No method of this class is protected by locks because instances of this
+    # class are only used in methods that are protected by locks themselves.
+
+    def __init__(self):
+        self._counts = [0]
+
+        # The term index refers to the number of the exponential histogram bucket
+        # used to determine its boundaries. The lower boundary of a bucket is
+        # determined by base ** index and the upper boundary of a bucket is
+        # determined by base ** (index + 1). index values are signedto account
+        # for values less than or equal to 1.
+
+        # self._index_* will all have values equal to a certain index that is
+        # determined by the corresponding mapping _map_to_index function and
+        # the value of the index depends on the value passed to _map_to_index.
+
+        # Index of the 0th position in self._counts: self._counts[0] is the
+        # count in the bucket with index self.__index_base.
+        self.__index_base = 0
+
+        # self.__index_start is the smallest index value represented in
+        # self._counts.
+        self.__index_start = 0
+
+        # self.__index_start is the largest index value represented in
+        # self._counts.
+        self.__index_end = 0
+
+    @property
+    def index_start(self) -> int:
+        return self.__index_start
+
+    @index_start.setter
+    def index_start(self, value: int) -> None:
+        self.__index_start = value
+
+    @property
+    def index_end(self) -> int:
+        return self.__index_end
+
+    @index_end.setter
+    def index_end(self, value: int) -> None:
+        self.__index_end = value
+
+    @property
+    def index_base(self) -> int:
+        return self.__index_base
+
+    @index_base.setter
+    def index_base(self, value: int) -> None:
+        self.__index_base = value
+
+    @property
+    def counts(self):
+        return self._counts
+
+    def get_offset_counts(self):
+        bias = self.__index_base - self.__index_start
+        return self._counts[-bias:] + self._counts[:-bias]
+
+    def grow(self, needed: int, max_size: int) -> None:
+        size = len(self._counts)
+        bias = self.__index_base - self.__index_start
+        old_positive_limit = size - bias
+
+        # 2 ** ceil(log2(needed)) finds the smallest power of two that is larger
+        # or equal than needed:
+        # 2 ** ceil(log2(1)) == 1
+        # 2 ** ceil(log2(2)) == 2
+        # 2 ** ceil(log2(3)) == 4
+        # 2 ** ceil(log2(4)) == 4
+        # 2 ** ceil(log2(5)) == 8
+        # 2 ** ceil(log2(6)) == 8
+        # 2 ** ceil(log2(7)) == 8
+        # 2 ** ceil(log2(8)) == 8
+        new_size = min(2 ** ceil(log2(needed)), max_size)
+
+        new_positive_limit = new_size - bias
+
+        tmp = [0] * new_size
+        tmp[new_positive_limit:] = self._counts[old_positive_limit:]
+        tmp[0:old_positive_limit] = self._counts[0:old_positive_limit]
+        self._counts = tmp
+
+    @property
+    def offset(self) -> int:
+        return self.__index_start
+
+    def __len__(self) -> int:
+        if len(self._counts) == 0:
+            return 0
+
+        if self.__index_end == self.__index_start and self[0] == 0:
+            return 0
+
+        return self.__index_end - self.__index_start + 1
+
+    def __getitem__(self, key: int) -> int:
+        bias = self.__index_base - self.__index_start
+
+        if key < bias:
+            key += len(self._counts)
+
+        key -= bias
+
+        return self._counts[key]
+
+    def downscale(self, amount: int) -> None:
+        """
+        Rotates, then collapses 2 ** amount to 1 buckets.
+        """
+
+        bias = self.__index_base - self.__index_start
+
+        if bias != 0:
+            self.__index_base = self.__index_start
+
+            # [0, 1, 2, 3, 4] Original backing array
+
+            self._counts = self._counts[::-1]
+            # [4, 3, 2, 1, 0]
+
+            self._counts = (
+                self._counts[:bias][::-1] + self._counts[bias:][::-1]
+            )
+            # [3, 4, 0, 1, 2] This is a rotation of the backing array.
+
+        size = 1 + self.__index_end - self.__index_start
+        each = 1 << amount
+        inpos = 0
+        outpos = 0
+
+        pos = self.__index_start
+
+        while pos <= self.__index_end:
+            mod = pos % each
+            if mod < 0:
+                mod += each
+
+            index = mod
+
+            while index < each and inpos < size:
+                if outpos != inpos:
+                    self._counts[outpos] += self._counts[inpos]
+                    self._counts[inpos] = 0
+
+                inpos += 1
+                pos += 1
+                index += 1
+
+            outpos += 1
+
+        self.__index_start >>= amount
+        self.__index_end >>= amount
+        self.__index_base = self.__index_start
+
+    def increment_bucket(self, bucket_index: int, increment: int = 1) -> None:
+        self._counts[bucket_index] += increment
+
+    def copy_empty(self) -> "Buckets":
+        copy = Buckets()
+
+        # pylint: disable=no-member
+        # pylint: disable=protected-access
+        # pylint: disable=attribute-defined-outside-init
+        # pylint: disable=invalid-name
+        copy._Buckets__index_base = self._Buckets__index_base
+        copy._Buckets__index_start = self._Buckets__index_start
+        copy._Buckets__index_end = self._Buckets__index_end
+        copy._counts = [0 for _ in self._counts]
+
+        return copy
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py
new file mode 100644
index 00000000..387b1d14
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py
@@ -0,0 +1,98 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+
+
+class Mapping(ABC):
+    """
+    Parent class for `LogarithmMapping` and `ExponentialMapping`.
+    """
+
+    # pylint: disable=no-member
+    def __new__(cls, scale: int):
+        with cls._mappings_lock:
+            # cls._mappings and cls._mappings_lock are implemented in each of
+            # the child classes as a dictionary and a lock, respectively. They
+            # are not instantiated here because that would lead to both child
+            # classes having the same instance of cls._mappings and
+            # cls._mappings_lock.
+            if scale not in cls._mappings:
+                cls._mappings[scale] = super().__new__(cls)
+                cls._mappings[scale]._init(scale)
+
+        return cls._mappings[scale]
+
+    @abstractmethod
+    def _init(self, scale: int) -> None:
+        # pylint: disable=attribute-defined-outside-init
+
+        if scale > self._get_max_scale():
+            # pylint: disable=broad-exception-raised
+            raise Exception(f"scale is larger than {self._max_scale}")
+
+        if scale < self._get_min_scale():
+            # pylint: disable=broad-exception-raised
+            raise Exception(f"scale is smaller than {self._min_scale}")
+
+        # The size of the exponential histogram buckets is determined by a
+        # parameter known as scale, larger values of scale will produce smaller
+        # buckets. Bucket boundaries of the exponential histogram are located
+        # at integer powers of the base, where:
+        #
+        # base = 2 ** (2 ** (-scale))
+        # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#all-scales-use-the-logarithm-function
+        self._scale = scale
+
+    @abstractmethod
+    def _get_min_scale(self) -> int:
+        """
+        Return the smallest possible value for the mapping scale
+        """
+
+    @abstractmethod
+    def _get_max_scale(self) -> int:
+        """
+        Return the largest possible value for the mapping scale
+        """
+
+    @abstractmethod
+    def map_to_index(self, value: float) -> int:
+        """
+        Maps positive floating point values to indexes corresponding to
+        `Mapping.scale`. Implementations are not expected to handle zeros,
+        +inf, NaN, or negative values.
+        """
+
+    @abstractmethod
+    def get_lower_boundary(self, index: int) -> float:
+        """
+        Returns the lower boundary of a given bucket index. The index is
+        expected to map onto a range that is at least partially inside the
+        range of normal floating point values.  If the corresponding
+        bucket's upper boundary is less than or equal to 2 ** -1022,
+        :class:`~opentelemetry.sdk.metrics.MappingUnderflowError`
+        will be raised. If the corresponding bucket's lower boundary is greater
+        than ``sys.float_info.max``,
+        :class:`~opentelemetry.sdk.metrics.MappingOverflowError`
+        will be raised.
+        """
+
+    @property
+    def scale(self) -> int:
+        """
+        Returns the parameter that controls the resolution of this mapping.
+        See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale
+        """
+        return self._scale
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py
new file mode 100644
index 00000000..477ed6f0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py
@@ -0,0 +1,26 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class MappingUnderflowError(Exception):
+    """
+    Raised when computing the lower boundary of an index that maps into a
+    denormal floating point value.
+    """
+
+
+class MappingOverflowError(Exception):
+    """
+    Raised when computing the lower boundary of an index that maps into +inf.
+    """
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py
new file mode 100644
index 00000000..297bb7a4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py
@@ -0,0 +1,141 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from math import ldexp
+from threading import Lock
+
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
+    Mapping,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
+    MappingOverflowError,
+    MappingUnderflowError,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
+    MANTISSA_WIDTH,
+    MAX_NORMAL_EXPONENT,
+    MIN_NORMAL_EXPONENT,
+    MIN_NORMAL_VALUE,
+    get_ieee_754_exponent,
+    get_ieee_754_mantissa,
+)
+
+
+class ExponentMapping(Mapping):
+    # Reference implementation here:
+    # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go
+
+    _mappings = {}
+    _mappings_lock = Lock()
+
+    _min_scale = -10
+    _max_scale = 0
+
+    def _get_min_scale(self):
+        # _min_scale defines the point at which the exponential mapping
+        # function becomes useless for 64-bit floats. With scale -10, ignoring
+        # subnormal values, bucket indices range from -1 to 1.
+        return -10
+
+    def _get_max_scale(self):
+        # _max_scale is the largest scale supported by exponential mapping. Use
+        # a logarithm mapping for larger scales.
+        return 0
+
+    def _init(self, scale: int):
+        # pylint: disable=attribute-defined-outside-init
+
+        super()._init(scale)
+
+        # self._min_normal_lower_boundary_index is the largest index such that
+        # base ** index < MIN_NORMAL_VALUE and
+        # base ** (index + 1) >= MIN_NORMAL_VALUE. An exponential histogram
+        # bucket with this index covers the range
+        # (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This
+        # is the smallest valid index that contains at least one normal value.
+        index = MIN_NORMAL_EXPONENT >> -self._scale
+
+        if -self._scale < 2:
+            # For scales -1 and 0, the maximum value 2 ** -1022 is a
+            # power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE.
+            # Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE.
+            index -= 1
+
+        self._min_normal_lower_boundary_index = index
+
+        # self._max_normal_lower_boundary_index is the index such that
+        # base**index equals the greatest representable lower boundary. An
+        # exponential histogram bucket with this index covers the range
+        # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
+        # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
+        # This bucket is incomplete, since the upper boundary cannot be
+        # represented. One greater than this index corresponds with the bucket
+        # containing values > 2 ** 1024.
+        self._max_normal_lower_boundary_index = (
+            MAX_NORMAL_EXPONENT >> -self._scale
+        )
+
+    def map_to_index(self, value: float) -> int:
+        if value < MIN_NORMAL_VALUE:
+            return self._min_normal_lower_boundary_index
+
+        exponent = get_ieee_754_exponent(value)
+
+        # Positive integers are represented in binary as having an infinite
+        # amount of leading zeroes, for example 2 is represented as ...00010.
+
+        # A negative integer -x is represented in binary as the complement of
+        # (x - 1). For example, -4 is represented as the complement of 4 - 1
+        # == 3. 3 is represented as ...00011. Its compliment is ...11100, the
+        # binary representation of -4.
+
+        # get_ieee_754_mantissa(value) gets the positive integer made up
+        # from the rightmost MANTISSA_WIDTH bits (the mantissa) of the IEEE
+        # 754 representation of value. If value is an exact power of 2, all
+        # these MANTISSA_WIDTH bits would be all zeroes, and when 1 is
+        # subtracted the resulting value is -1. The binary representation of
+        # -1 is ...111, so when these bits are right shifted MANTISSA_WIDTH
+        # places, the resulting value for correction is -1. If value is not an
+        # exact power of 2, at least one of the rightmost MANTISSA_WIDTH
+        # bits would be 1 (even for values whose decimal part is 0, like 5.0
+        # since the IEEE 754 of such number is too the product of a power of 2
+        # (defined in the exponent part of the IEEE 754 representation) and the
+        # value defined in the mantissa). Having at least one of the rightmost
+        # MANTISSA_WIDTH bit being 1 means that get_ieee_754(value) will
+        # always be greater or equal to 1, and when 1 is subtracted, the
+        # result will be greater or equal to 0, whose representation in binary
+        # will be of at most MANTISSA_WIDTH ones that have an infinite
+        # amount of leading zeroes. When those MANTISSA_WIDTH bits are
+        # shifted to the right MANTISSA_WIDTH places, the resulting value
+        # will be 0.
+
+        # In summary, correction will be -1 if value is a power of 2, 0 if not.
+
+        # FIXME Document why we can assume value will not be 0, inf, or NaN.
+        correction = (get_ieee_754_mantissa(value) - 1) >> MANTISSA_WIDTH
+
+        return (exponent + correction) >> -self._scale
+
+    def get_lower_boundary(self, index: int) -> float:
+        if index < self._min_normal_lower_boundary_index:
+            raise MappingUnderflowError()
+
+        if index > self._max_normal_lower_boundary_index:
+            raise MappingOverflowError()
+
+        return ldexp(1, index << -self._scale)
+
+    @property
+    def scale(self) -> int:
+        return self._scale
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md
new file mode 100644
index 00000000..0cf5c8c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md
@@ -0,0 +1,175 @@
+# IEEE 754 Explained
+
+IEEE 754 is a standard that defines a way to represent certain mathematical
+objects using binary numbers.
+
+## Binary Number Fields
+
+The binary numbers used in IEEE 754 can have different lengths, the length that
+is interesting for the purposes of this project is 64 bits. These binary
+numbers are made up of 3 contiguous fields of bits, from left to right:
+
+1. 1 sign bit
+2. 11 exponent bits
+3. 52 mantissa bits
+
+Depending on the values these fields have, the represented mathematical object
+can be one of:
+
+* Floating point number
+* Zero
+* NaN
+* Infinite
+
+## Floating Point Numbers
+
+IEEE 754 represents a floating point number $f$ using an exponential
+notation with 4 components: $sign$, $mantissa$, $base$ and $exponent$:
+
+$$f = sign \times mantissa \times base ^ {exponent}$$
+
+There are two possible representations of floating point numbers:
+_normal_ and _denormal_, which have different valid values for
+their $mantissa$ and $exponent$ fields.
+
+### Binary Representation
+
+$sign$, $mantissa$, and $exponent$ are represented in binary, the
+representation of each component has certain details explained next.
+
+$base$ is always $2$ and it is not represented in binary.
+
+#### Sign
+
+$sign$ can have 2 values:
+
+1. $1$ if the `sign` bit is `0`
+2. $-1$ if the `sign` bit is `1`.
+
+#### Mantissa
+
+##### Normal Floating Point Numbers
+
+$mantissa$ is a positive fractional number whose integer part is $1$, for example
+$1.2345 \dots$. The `mantissa` bits represent only the fractional part and the
+$mantissa$ value can be calculated as:
+
+$$mantissa = 1 + \sum_{i=1}^{52} b_{i} \times 2^{-i} = 1 + \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
+
+Where $b_{i}$ is:
+
+1. $0$ if the bit at the position `i - 1` is `0`.
+2. $1$ if the bit at the position `i - 1` is `1`.
+
+##### Denormal Floating Point Numbers
+
+$mantissa$ is a positive fractional number whose integer part is $0$, for example
+$0.12345 \dots$. The `mantissa` bits represent only the fractional part and the
+$mantissa$ value can be calculated as:
+
+$$mantissa = \sum_{i=1}^{52} b_{i} \times 2^{-i} = \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
+
+Where $b_{i}$ is:
+
+1. $0$ if the bit at the position `i - 1` is `0`.
+2. $1$ if the bit at the position `i - 1` is `1`.
+
+#### Exponent
+
+##### Normal Floating Point Numbers
+
+Only the following bit sequences are allowed: `00000000001` to `11111111110`.
+That is, there must be at least one `0` and one `1` in the exponent bits.
+
+The actual value of the $exponent$ can be calculated as:
+
+$$exponent = v - bias$$
+
+where $v$ is the value of the binary number in the exponent bits and $bias$ is $1023$.
+Considering the restrictions above, the respective minimum and maximum values for the
+exponent are:
+
+1. `00000000001` = $1$, $1 - 1023 = -1022$
+2. `11111111110` = $2046$, $2046 - 1023 = 1023$
+
+So, $exponent$ is an integer in the range $\left[-1022, 1023\right]$.
+
+
+##### Denormal Floating Point Numbers
+
+$exponent$ is always $-1022$. Nevertheless, it is always represented as `00000000000`.
+
+### Normal and Denormal Floating Point Numbers
+
+The smallest absolute value a normal floating point number can have is calculated
+like this:
+
+$$1 \times 1.0\dots0 \times 2^{-1022} = 2.2250738585072014 \times 10^{-308}$$
+
+Since normal floating point numbers always have a $1$ as the integer part of the
+$mantissa$, then smaller values can be achieved by using the smallest possible exponent
+( $-1022$ ) and a $0$ in the integer part of the $mantissa$, but significant digits are lost.
+
+The smallest absolute value a denormal floating point number can have is calculated
+like this:
+
+$$1 \times 2^{-52} \times 2^{-1022} = 5 \times 10^{-324}$$
+
+## Zero
+
+Zero is represented like this:
+
+* Sign bit: `X`
+* Exponent bits: `00000000000`
+* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
+
+where `X` means `0` or `1`.
+
+## NaN
+
+There are 2 kinds of NaNs that are represented:
+
+1. QNaNs (Quiet NaNs): represent the result of indeterminate operations.
+2. SNaNs (Signalling NaNs): represent the result of invalid operations.
+
+### QNaNs
+
+QNaNs are represented like this:
+
+* Sign bit: `X`
+* Exponent bits: `11111111111`
+* Mantissa bits: `1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`
+
+where `X` means `0` or `1`.
+
+### SNaNs
+
+SNaNs are represented like this:
+
+* Sign bit: `X`
+* Exponent bits: `11111111111`
+* Mantissa bits: `0XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1`
+
+where `X` means `0` or `1`.
+
+## Infinite
+
+### Positive Infinite
+
+Positive infinite is represented like this:
+
+* Sign bit: `0`
+* Exponent bits: `11111111111`
+* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
+
+where `X` means `0` or `1`.
+
+### Negative Infinite
+
+Negative infinite is represented like this:
+
+* Sign bit: `1`
+* Exponent bits: `11111111111`
+* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
+
+where `X` means `0` or `1`.
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py
new file mode 100644
index 00000000..d4b7e861
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py
@@ -0,0 +1,117 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ctypes import c_double, c_uint64
+from sys import float_info
+
+# IEEE 754 64-bit floating point numbers use 11 bits for the exponent and 52
+# bits for the mantissa.
+MANTISSA_WIDTH = 52
+EXPONENT_WIDTH = 11
+
+# This mask is equivalent to 52 "1" bits (there are 13 hexadecimal 4-bit "f"s
+# in the mantissa mask, 13 * 4 == 52) or 0xfffffffffffff in hexadecimal.
+MANTISSA_MASK = (1 << MANTISSA_WIDTH) - 1
+
+# There are 11 bits for the exponent, but the exponent values 0 (11 "0"
+# bits) and 2047 (11 "1" bits) have special meanings so the exponent range is
+# from 1 to 2046. To calculate the exponent value, 1023 (the bias) is
+# subtracted from the exponent, so the exponent value range is from -1022 to
+# +1023.
+EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1
+
+# All the exponent mask bits are set to 1 for the 11 exponent bits.
+EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH
+
+# The sign mask has the first bit set to 1 and the rest to 0.
+SIGN_MASK = 1 << (EXPONENT_WIDTH + MANTISSA_WIDTH)
+
+# For normal floating point numbers, the exponent can have a value in the
+# range [-1022, 1023].
+MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1
+MAX_NORMAL_EXPONENT = EXPONENT_BIAS
+
+# The smallest possible normal value is 2.2250738585072014e-308.
+# This value is the result of using the smallest possible number in the
+# mantissa, 1.0000000000000000000000000000000000000000000000000000 (52 "0"s in
+# the fractional part) and a single "1" in the exponent.
+# Finally 1 * (2 ** -1022) = 2.2250738585072014e-308.
+MIN_NORMAL_VALUE = float_info.min
+
+# Greatest possible normal value (1.7976931348623157e+308)
+# The binary representation of a float in scientific notation uses (for the
+# mantissa) one bit for the integer part (which is implicit) and 52 bits for
+# the fractional part. Consider a float binary 1.111. It is equal to 1 + 1/2 +
+# 1/4 + 1/8. The greatest possible value in the 52-bit binary mantissa would be
+# then 1.1111111111111111111111111111111111111111111111111111 (52 "1"s in the
+# fractional part) whose decimal value is 1.9999999999999998. Finally,
+# 1.9999999999999998 * (2 ** 1023) = 1.7976931348623157e+308.
+MAX_NORMAL_VALUE = float_info.max
+
+
+def get_ieee_754_exponent(value: float) -> int:
+    """
+    Gets the exponent of the IEEE 754 representation of a float.
+    """
+
+    return (
+        (
+            # This step gives the integer that corresponds to the IEEE 754
+            # representation of a float. For example, consider
+            # -MAX_NORMAL_VALUE for an example. We choose this value because
+            # of its binary representation which makes easy to understand the
+            # subsequent operations.
+            #
+            # c_uint64.from_buffer(c_double(-MAX_NORMAL_VALUE)).value == 18442240474082181119
+            # bin(18442240474082181119) == '0b1111111111101111111111111111111111111111111111111111111111111111'
+            #
+            # The first bit of the previous binary number is the sign bit: 1 (1 means negative, 0 means positive)
+            # The next 11 bits are the exponent bits: 11111111110
+            # The next 52 bits are the mantissa bits: 1111111111111111111111111111111111111111111111111111
+            #
+            # This step isolates the exponent bits, turning every bit outside
+            # of the exponent field (sign and mantissa bits) to 0.
+            c_uint64.from_buffer(c_double(value)).value & EXPONENT_MASK
+            # For the example this means:
+            # 18442240474082181119 & EXPONENT_MASK == 9214364837600034816
+            # bin(9214364837600034816) == '0b111111111100000000000000000000000000000000000000000000000000000'
+            # Notice that the previous binary representation does not include
+            # leading zeroes, so the sign bit is not included since it is a
+            # zero.
+        )
+        # This step moves the exponent bits to the right, removing the
+        # mantissa bits that were set to 0 by the previous step. This
+        # leaves the IEEE 754 exponent value, ready for the next step.
+        >> MANTISSA_WIDTH
+        # For the example this means:
+        # 9214364837600034816 >> MANTISSA_WIDTH == 2046
+        # bin(2046) == '0b11111111110'
+        # As shown above, these are the original 11 bits that correspond to the
+        # exponent.
+        # This step subtracts the exponent bias from the IEEE 754 value,
+        # leaving the actual exponent value.
+    ) - EXPONENT_BIAS
+    # For the example this means:
+    # 2046 - EXPONENT_BIAS == 1023
+    # As mentioned in a comment above, the largest value for the exponent is
+
+
+def get_ieee_754_mantissa(value: float) -> int:
+    return (
+        c_uint64.from_buffer(c_double(value)).value
+        # This step isolates the mantissa bits. There is no need to do any
+        # bit shifting as the mantissa bits are already the rightmost field
+        # in an IEEE 754 representation.
+        & MANTISSA_MASK
+    )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py
new file mode 100644
index 00000000..e73f3a81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py
@@ -0,0 +1,138 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from math import exp, floor, ldexp, log
+from threading import Lock
+
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
+    Mapping,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
+    MappingOverflowError,
+    MappingUnderflowError,
+)
+from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
+    MAX_NORMAL_EXPONENT,
+    MIN_NORMAL_EXPONENT,
+    MIN_NORMAL_VALUE,
+    get_ieee_754_exponent,
+    get_ieee_754_mantissa,
+)
+
+
+class LogarithmMapping(Mapping):
+    # Reference implementation here:
+    # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go
+
+    _mappings = {}
+    _mappings_lock = Lock()
+
+    _min_scale = 1
+    _max_scale = 20
+
+    def _get_min_scale(self):
+        # _min_scale ensures that ExponentMapping is used for zero and negative
+        # scale values.
+        return self._min_scale
+
+    def _get_max_scale(self):
+        # FIXME The Go implementation uses a value of 20 here, find out the
+        # right value for this implementation, more information here:
+        # https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function
+        # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45
+        return self._max_scale
+
+    def _init(self, scale: int):
+        # pylint: disable=attribute-defined-outside-init
+
+        super()._init(scale)
+
+        # self._scale_factor is defined as a multiplier because multiplication
+        # is faster than division. self._scale_factor is defined as:
+        # index = log(value) * self._scale_factor
+        # Where:
+        # index = log(value) / log(base)
+        # index = log(value) / log(2 ** (2 ** -scale))
+        # index = log(value) / ((2 ** -scale) * log(2))
+        # index = log(value) * ((1 / log(2)) * (2 ** scale))
+        # self._scale_factor = ((1 / log(2)) * (2 ** scale))
+        # self._scale_factor = (1 /log(2)) * (2 ** scale)
+        # self._scale_factor = ldexp(1 / log(2), scale)
+        # This implementation was copied from a Java prototype. See:
+        # https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java
+        # for the equations used here.
+        self._scale_factor = ldexp(1 / log(2), scale)
+
+        # self._min_normal_lower_boundary_index is the index such that
+        # base ** index == MIN_NORMAL_VALUE. An exponential histogram bucket
+        # with this index covers the range
+        # (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index
+        # corresponds with the bucket containing values <= MIN_NORMAL_VALUE.
+        self._min_normal_lower_boundary_index = (
+            MIN_NORMAL_EXPONENT << self._scale
+        )
+
+        # self._max_normal_lower_boundary_index is the index such that
+        # base ** index equals the greatest representable lower boundary. An
+        # exponential histogram bucket with this index covers the range
+        # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
+        # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
+        # This bucket is incomplete, since the upper boundary cannot be
+        # represented. One greater than this index corresponds with the bucket
+        # containing values > 2 ** 1024.
+        self._max_normal_lower_boundary_index = (
+            (MAX_NORMAL_EXPONENT + 1) << self._scale
+        ) - 1
+
+    def map_to_index(self, value: float) -> int:
+        """
+        Maps positive floating point values to indexes corresponding to scale.
+        """
+
+        # value is subnormal
+        if value <= MIN_NORMAL_VALUE:
+            return self._min_normal_lower_boundary_index - 1
+
+        # value is an exact power of two.
+        if get_ieee_754_mantissa(value) == 0:
+            exponent = get_ieee_754_exponent(value)
+            return (exponent << self._scale) - 1
+
+        return min(
+            floor(log(value) * self._scale_factor),
+            self._max_normal_lower_boundary_index,
+        )
+
+    def get_lower_boundary(self, index: int) -> float:
+        if index >= self._max_normal_lower_boundary_index:
+            if index == self._max_normal_lower_boundary_index:
+                return 2 * exp(
+                    (index - (1 << self._scale)) / self._scale_factor
+                )
+            raise MappingOverflowError()
+
+        if index <= self._min_normal_lower_boundary_index:
+            if index == self._min_normal_lower_boundary_index:
+                return MIN_NORMAL_VALUE
+            if index == self._min_normal_lower_boundary_index - 1:
+                return (
+                    exp((index + (1 << self._scale)) / self._scale_factor) / 2
+                )
+            raise MappingUnderflowError()
+
+        return exp(index / self._scale_factor)
+
+    @property
+    def scale(self) -> int:
+        return self._scale
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py
new file mode 100644
index 00000000..52c68334
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py
@@ -0,0 +1,576 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+import math
+import os
+import weakref
+from abc import ABC, abstractmethod
+from enum import Enum
+from logging import getLogger
+from os import environ, linesep
+from sys import stdout
+from threading import Event, Lock, RLock, Thread
+from time import time_ns
+from typing import IO, Callable, Iterable, Optional
+
+from typing_extensions import final
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics._internal
+from opentelemetry.context import (
+    _SUPPRESS_INSTRUMENTATION_KEY,
+    attach,
+    detach,
+    set_value,
+)
+from opentelemetry.sdk.environment_variables import (
+    OTEL_METRIC_EXPORT_INTERVAL,
+    OTEL_METRIC_EXPORT_TIMEOUT,
+)
+from opentelemetry.sdk.metrics._internal.aggregation import (
+    AggregationTemporality,
+    DefaultAggregation,
+)
+from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.instrument import (
+    Counter,
+    Gauge,
+    Histogram,
+    ObservableCounter,
+    ObservableGauge,
+    ObservableUpDownCounter,
+    UpDownCounter,
+    _Counter,
+    _Gauge,
+    _Histogram,
+    _ObservableCounter,
+    _ObservableGauge,
+    _ObservableUpDownCounter,
+    _UpDownCounter,
+)
+from opentelemetry.sdk.metrics._internal.point import MetricsData
+from opentelemetry.util._once import Once
+
+_logger = getLogger(__name__)
+
+
+class MetricExportResult(Enum):
+    """Result of exporting a metric
+
+    Can be any of the following values:"""
+
+    SUCCESS = 0
+    FAILURE = 1
+
+
+class MetricExporter(ABC):
+    """Interface for exporting metrics.
+
+    Interface to be implemented by services that want to export metrics received
+    in their own format.
+
+    Args:
+        preferred_temporality: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
+            configure exporter level preferred temporality. See `opentelemetry.sdk.metrics.export.MetricReader` for
+            more details on what preferred temporality is.
+        preferred_aggregation: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
+            configure exporter level preferred aggregation. See `opentelemetry.sdk.metrics.export.MetricReader` for
+            more details on what preferred aggregation is.
+    """
+
+    def __init__(
+        self,
+        preferred_temporality: dict[type, AggregationTemporality]
+        | None = None,
+        preferred_aggregation: dict[
+            type, "opentelemetry.sdk.metrics.view.Aggregation"
+        ]
+        | None = None,
+    ) -> None:
+        self._preferred_temporality = preferred_temporality
+        self._preferred_aggregation = preferred_aggregation
+
+    @abstractmethod
+    def export(
+        self,
+        metrics_data: MetricsData,
+        timeout_millis: float = 10_000,
+        **kwargs,
+    ) -> MetricExportResult:
+        """Exports a batch of telemetry data.
+
+        Args:
+            metrics: The list of `opentelemetry.sdk.metrics.export.Metric` objects to be exported
+
+        Returns:
+            The result of the export
+        """
+
+    @abstractmethod
+    def force_flush(self, timeout_millis: float = 10_000) -> bool:
+        """
+        Ensure that export of any metrics currently received by the exporter
+        are completed as soon as possible.
+        """
+
+    @abstractmethod
+    def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
+        """Shuts down the exporter.
+
+        Called when the SDK is shut down.
+        """
+
+
+class ConsoleMetricExporter(MetricExporter):
+    """Implementation of :class:`MetricExporter` that prints metrics to the
+    console.
+
+    This class can be used for diagnostic purposes. It prints the exported
+    metrics to the console STDOUT.
+    """
+
+    def __init__(
+        self,
+        out: IO = stdout,
+        formatter: Callable[
+            ["opentelemetry.sdk.metrics.export.MetricsData"], str
+        ] = lambda metrics_data: metrics_data.to_json() + linesep,
+        preferred_temporality: dict[type, AggregationTemporality]
+        | None = None,
+        preferred_aggregation: dict[
+            type, "opentelemetry.sdk.metrics.view.Aggregation"
+        ]
+        | None = None,
+    ):
+        super().__init__(
+            preferred_temporality=preferred_temporality,
+            preferred_aggregation=preferred_aggregation,
+        )
+        self.out = out
+        self.formatter = formatter
+
+    def export(
+        self,
+        metrics_data: MetricsData,
+        timeout_millis: float = 10_000,
+        **kwargs,
+    ) -> MetricExportResult:
+        self.out.write(self.formatter(metrics_data))
+        self.out.flush()
+        return MetricExportResult.SUCCESS
+
+    def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
+        pass
+
+    def force_flush(self, timeout_millis: float = 10_000) -> bool:
+        return True
+
+
+class MetricReader(ABC):
+    # pylint: disable=too-many-branches,broad-exception-raised
+    """
+    Base class for all metric readers
+
+    Args:
+        preferred_temporality: A mapping between instrument classes and
+            aggregation temporality. By default uses CUMULATIVE for all instrument
+            classes. This mapping will be used to define the default aggregation
+            temporality of every instrument class. If the user wants to make a
+            change in the default aggregation temporality of an instrument class,
+            it is enough to pass here a dictionary whose keys are the instrument
+            classes and the values are the corresponding desired aggregation
+            temporalities of the classes that the user wants to change, not all of
+            them. The classes not included in the passed dictionary will retain
+            their association to their default aggregation temporalities.
+        preferred_aggregation: A mapping between instrument classes and
+            aggregation instances. By default maps all instrument classes to an
+            instance of `DefaultAggregation`. This mapping will be used to
+            define the default aggregation of every instrument class. If the
+            user wants to make a change in the default aggregation of an
+            instrument class, it is enough to pass here a dictionary whose keys
+            are the instrument classes and the values are the corresponding
+            desired aggregation for the instrument classes that the user wants
+            to change, not necessarily all of them. The classes not included in
+            the passed dictionary will retain their association to their
+            default aggregations. The aggregation defined here will be
+            overridden by an aggregation defined by a view that is not
+            `DefaultAggregation`.
+
+    .. document protected _receive_metrics which is a intended to be overridden by subclass
+    .. automethod:: _receive_metrics
+    """
+
+    def __init__(
+        self,
+        preferred_temporality: dict[type, AggregationTemporality]
+        | None = None,
+        preferred_aggregation: dict[
+            type, "opentelemetry.sdk.metrics.view.Aggregation"
+        ]
+        | None = None,
+    ) -> None:
+        self._collect: Callable[
+            [
+                "opentelemetry.sdk.metrics.export.MetricReader",
+                AggregationTemporality,
+            ],
+            Iterable["opentelemetry.sdk.metrics.export.Metric"],
+        ] = None
+
+        self._instrument_class_temporality = {
+            _Counter: AggregationTemporality.CUMULATIVE,
+            _UpDownCounter: AggregationTemporality.CUMULATIVE,
+            _Histogram: AggregationTemporality.CUMULATIVE,
+            _Gauge: AggregationTemporality.CUMULATIVE,
+            _ObservableCounter: AggregationTemporality.CUMULATIVE,
+            _ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
+            _ObservableGauge: AggregationTemporality.CUMULATIVE,
+        }
+
+        if preferred_temporality is not None:
+            for temporality in preferred_temporality.values():
+                if temporality not in (
+                    AggregationTemporality.CUMULATIVE,
+                    AggregationTemporality.DELTA,
+                ):
+                    raise Exception(
+                        f"Invalid temporality value found {temporality}"
+                    )
+
+        if preferred_temporality is not None:
+            for typ, temporality in preferred_temporality.items():
+                if typ is Counter:
+                    self._instrument_class_temporality[_Counter] = temporality
+                elif typ is UpDownCounter:
+                    self._instrument_class_temporality[_UpDownCounter] = (
+                        temporality
+                    )
+                elif typ is Histogram:
+                    self._instrument_class_temporality[_Histogram] = (
+                        temporality
+                    )
+                elif typ is Gauge:
+                    self._instrument_class_temporality[_Gauge] = temporality
+                elif typ is ObservableCounter:
+                    self._instrument_class_temporality[_ObservableCounter] = (
+                        temporality
+                    )
+                elif typ is ObservableUpDownCounter:
+                    self._instrument_class_temporality[
+                        _ObservableUpDownCounter
+                    ] = temporality
+                elif typ is ObservableGauge:
+                    self._instrument_class_temporality[_ObservableGauge] = (
+                        temporality
+                    )
+                else:
+                    raise Exception(f"Invalid instrument class found {typ}")
+
+        self._preferred_temporality = preferred_temporality
+        self._instrument_class_aggregation = {
+            _Counter: DefaultAggregation(),
+            _UpDownCounter: DefaultAggregation(),
+            _Histogram: DefaultAggregation(),
+            _Gauge: DefaultAggregation(),
+            _ObservableCounter: DefaultAggregation(),
+            _ObservableUpDownCounter: DefaultAggregation(),
+            _ObservableGauge: DefaultAggregation(),
+        }
+
+        if preferred_aggregation is not None:
+            for typ, aggregation in preferred_aggregation.items():
+                if typ is Counter:
+                    self._instrument_class_aggregation[_Counter] = aggregation
+                elif typ is UpDownCounter:
+                    self._instrument_class_aggregation[_UpDownCounter] = (
+                        aggregation
+                    )
+                elif typ is Histogram:
+                    self._instrument_class_aggregation[_Histogram] = (
+                        aggregation
+                    )
+                elif typ is Gauge:
+                    self._instrument_class_aggregation[_Gauge] = aggregation
+                elif typ is ObservableCounter:
+                    self._instrument_class_aggregation[_ObservableCounter] = (
+                        aggregation
+                    )
+                elif typ is ObservableUpDownCounter:
+                    self._instrument_class_aggregation[
+                        _ObservableUpDownCounter
+                    ] = aggregation
+                elif typ is ObservableGauge:
+                    self._instrument_class_aggregation[_ObservableGauge] = (
+                        aggregation
+                    )
+                else:
+                    raise Exception(f"Invalid instrument class found {typ}")
+
+    @final
+    def collect(self, timeout_millis: float = 10_000) -> None:
+        """Collects the metrics from the internal SDK state and
+        invokes the `_receive_metrics` with the collection.
+
+        Args:
+            timeout_millis: Amount of time in milliseconds before this function
+              raises a timeout error.
+
+        If any of the underlying ``collect`` methods called by this method
+        fails by any reason (including timeout) an exception will be raised
+        detailing the individual errors that caused this function to fail.
+        """
+        if self._collect is None:
+            _logger.warning(
+                "Cannot call collect on a MetricReader until it is registered on a MeterProvider"
+            )
+            return
+
+        metrics = self._collect(self, timeout_millis=timeout_millis)
+
+        if metrics is not None:
+            self._receive_metrics(
+                metrics,
+                timeout_millis=timeout_millis,
+            )
+
+    @final
+    def _set_collect_callback(
+        self,
+        func: Callable[
+            [
+                "opentelemetry.sdk.metrics.export.MetricReader",
+                AggregationTemporality,
+            ],
+            Iterable["opentelemetry.sdk.metrics.export.Metric"],
+        ],
+    ) -> None:
+        """This function is internal to the SDK. It should not be called or overridden by users"""
+        self._collect = func
+
+    @abstractmethod
+    def _receive_metrics(
+        self,
+        metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
+        timeout_millis: float = 10_000,
+        **kwargs,
+    ) -> None:
+        """Called by `MetricReader.collect` when it receives a batch of metrics"""
+
+    def force_flush(self, timeout_millis: float = 10_000) -> bool:
+        self.collect(timeout_millis=timeout_millis)
+        return True
+
+    @abstractmethod
+    def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
+        """Shuts down the MetricReader. This method provides a way
+        for the MetricReader to do any cleanup required. A metric reader can
+        only be shutdown once, any subsequent calls are ignored and return
+        failure status.
+
+        When a `MetricReader` is registered on a
+        :class:`~opentelemetry.sdk.metrics.MeterProvider`,
+        :meth:`~opentelemetry.sdk.metrics.MeterProvider.shutdown` will invoke this
+        automatically.
+        """
+
+
+class InMemoryMetricReader(MetricReader):
+    """Implementation of `MetricReader` that returns its metrics from :func:`get_metrics_data`.
+
+    This is useful for e.g. unit tests.
+    """
+
+    def __init__(
+        self,
+        preferred_temporality: dict[type, AggregationTemporality]
+        | None = None,
+        preferred_aggregation: dict[
+            type, "opentelemetry.sdk.metrics.view.Aggregation"
+        ]
+        | None = None,
+    ) -> None:
+        super().__init__(
+            preferred_temporality=preferred_temporality,
+            preferred_aggregation=preferred_aggregation,
+        )
+        self._lock = RLock()
+        self._metrics_data: "opentelemetry.sdk.metrics.export.MetricsData" = (
+            None
+        )
+
+    def get_metrics_data(
+        self,
+    ) -> Optional["opentelemetry.sdk.metrics.export.MetricsData"]:
+        """Reads and returns current metrics from the SDK"""
+        with self._lock:
+            self.collect()
+            metrics_data = self._metrics_data
+            self._metrics_data = None
+        return metrics_data
+
+    def _receive_metrics(
+        self,
+        metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
+        timeout_millis: float = 10_000,
+        **kwargs,
+    ) -> None:
+        with self._lock:
+            self._metrics_data = metrics_data
+
+    def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
+        pass
+
+
+class PeriodicExportingMetricReader(MetricReader):
+    """`PeriodicExportingMetricReader` is an implementation of `MetricReader`
+    that collects metrics based on a user-configurable time interval, and passes the
+    metrics to the configured exporter. If the time interval is set to `math.inf`, the
+    reader will not invoke periodic collection.
+
+    The configured exporter's :py:meth:`~MetricExporter.export` method will not be called
+    concurrently.
+    """
+
+    def __init__(
+        self,
+        exporter: MetricExporter,
+        export_interval_millis: Optional[float] = None,
+        export_timeout_millis: Optional[float] = None,
+    ) -> None:
+        # PeriodicExportingMetricReader defers to exporter for configuration
+        super().__init__(
+            preferred_temporality=exporter._preferred_temporality,
+            preferred_aggregation=exporter._preferred_aggregation,
+        )
+
+        # This lock is held whenever calling self._exporter.export() to prevent concurrent
+        # execution of MetricExporter.export()
+        # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch
+        self._export_lock = Lock()
+
+        self._exporter = exporter
+        if export_interval_millis is None:
+            try:
+                export_interval_millis = float(
+                    environ.get(OTEL_METRIC_EXPORT_INTERVAL, 60000)
+                )
+            except ValueError:
+                _logger.warning(
+                    "Found invalid value for export interval, using default"
+                )
+                export_interval_millis = 60000
+        if export_timeout_millis is None:
+            try:
+                export_timeout_millis = float(
+                    environ.get(OTEL_METRIC_EXPORT_TIMEOUT, 30000)
+                )
+            except ValueError:
+                _logger.warning(
+                    "Found invalid value for export timeout, using default"
+                )
+                export_timeout_millis = 30000
+        self._export_interval_millis = export_interval_millis
+        self._export_timeout_millis = export_timeout_millis
+        self._shutdown = False
+        self._shutdown_event = Event()
+        self._shutdown_once = Once()
+        self._daemon_thread = None
+        if (
+            self._export_interval_millis > 0
+            and self._export_interval_millis < math.inf
+        ):
+            self._daemon_thread = Thread(
+                name="OtelPeriodicExportingMetricReader",
+                target=self._ticker,
+                daemon=True,
+            )
+            self._daemon_thread.start()
+            if hasattr(os, "register_at_fork"):
+                weak_at_fork = weakref.WeakMethod(self._at_fork_reinit)
+
+                os.register_at_fork(
+                    after_in_child=lambda: weak_at_fork()()  # pylint: disable=unnecessary-lambda, protected-access
+                )
+        elif self._export_interval_millis <= 0:
+            raise ValueError(
+                f"interval value {self._export_interval_millis} is invalid \
+                and needs to be larger than zero."
+            )
+
+    def _at_fork_reinit(self):
+        self._daemon_thread = Thread(
+            name="OtelPeriodicExportingMetricReader",
+            target=self._ticker,
+            daemon=True,
+        )
+        self._daemon_thread.start()
+
+    def _ticker(self) -> None:
+        interval_secs = self._export_interval_millis / 1e3
+        while not self._shutdown_event.wait(interval_secs):
+            try:
+                self.collect(timeout_millis=self._export_timeout_millis)
+            except MetricsTimeoutError:
+                _logger.warning(
+                    "Metric collection timed out. Will try again after %s seconds",
+                    interval_secs,
+                    exc_info=True,
+                )
+        # one last collection below before shutting down completely
+        try:
+            self.collect(timeout_millis=self._export_interval_millis)
+        except MetricsTimeoutError:
+            _logger.warning(
+                "Metric collection timed out.",
+                exc_info=True,
+            )
+
+    def _receive_metrics(
+        self,
+        metrics_data: MetricsData,
+        timeout_millis: float = 10_000,
+        **kwargs,
+    ) -> None:
+        token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
+        # pylint: disable=broad-exception-caught,invalid-name
+        try:
+            with self._export_lock:
+                self._exporter.export(
+                    metrics_data, timeout_millis=timeout_millis
+                )
+        except Exception:
+            _logger.exception("Exception while exporting metrics")
+        detach(token)
+
+    def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
+        deadline_ns = time_ns() + timeout_millis * 10**6
+
+        def _shutdown():
+            self._shutdown = True
+
+        did_set = self._shutdown_once.do_once(_shutdown)
+        if not did_set:
+            _logger.warning("Can't shutdown multiple times")
+            return
+
+        self._shutdown_event.set()
+        if self._daemon_thread:
+            self._daemon_thread.join(timeout=(deadline_ns - time_ns()) / 10**9)
+        self._exporter.shutdown(timeout=(deadline_ns - time_ns()) / 10**6)
+
+    def force_flush(self, timeout_millis: float = 10_000) -> bool:
+        super().force_flush(timeout_millis=timeout_millis)
+        self._exporter.force_flush(timeout_millis=timeout_millis)
+        return True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py
new file mode 100644
index 00000000..b01578f4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py
@@ -0,0 +1,334 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=too-many-ancestors, unused-import
+from __future__ import annotations
+
+from logging import getLogger
+from time import time_ns
+from typing import Generator, Iterable, List, Sequence, Union
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics
+from opentelemetry.context import Context, get_current
+from opentelemetry.metrics import CallbackT
+from opentelemetry.metrics import Counter as APICounter
+from opentelemetry.metrics import Histogram as APIHistogram
+from opentelemetry.metrics import ObservableCounter as APIObservableCounter
+from opentelemetry.metrics import ObservableGauge as APIObservableGauge
+from opentelemetry.metrics import (
+    ObservableUpDownCounter as APIObservableUpDownCounter,
+)
+from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
+from opentelemetry.metrics import _Gauge as APIGauge
+from opentelemetry.metrics._internal.instrument import (
+    CallbackOptions,
+    _MetricsHistogramAdvisory,
+)
+from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.util.instrumentation import InstrumentationScope
+
+_logger = getLogger(__name__)
+
+
+_ERROR_MESSAGE = (
+    "Expected ASCII string of maximum length 63 characters but got {}"
+)
+
+
+class _Synchronous:
+    def __init__(
+        self,
+        name: str,
+        instrumentation_scope: InstrumentationScope,
+        measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
+        unit: str = "",
+        description: str = "",
+    ):
+        # pylint: disable=no-member
+        result = self._check_name_unit_description(name, unit, description)
+
+        if result["name"] is None:
+            # pylint: disable=broad-exception-raised
+            raise Exception(_ERROR_MESSAGE.format(name))
+
+        if result["unit"] is None:
+            # pylint: disable=broad-exception-raised
+            raise Exception(_ERROR_MESSAGE.format(unit))
+
+        name = result["name"]
+        unit = result["unit"]
+        description = result["description"]
+
+        self.name = name.lower()
+        self.unit = unit
+        self.description = description
+        self.instrumentation_scope = instrumentation_scope
+        self._measurement_consumer = measurement_consumer
+        super().__init__(name, unit=unit, description=description)
+
+
+class _Asynchronous:
+    def __init__(
+        self,
+        name: str,
+        instrumentation_scope: InstrumentationScope,
+        measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
+        callbacks: Iterable[CallbackT] | None = None,
+        unit: str = "",
+        description: str = "",
+    ):
+        # pylint: disable=no-member
+        result = self._check_name_unit_description(name, unit, description)
+
+        if result["name"] is None:
+            # pylint: disable=broad-exception-raised
+            raise Exception(_ERROR_MESSAGE.format(name))
+
+        if result["unit"] is None:
+            # pylint: disable=broad-exception-raised
+            raise Exception(_ERROR_MESSAGE.format(unit))
+
+        name = result["name"]
+        unit = result["unit"]
+        description = result["description"]
+
+        self.name = name.lower()
+        self.unit = unit
+        self.description = description
+        self.instrumentation_scope = instrumentation_scope
+        self._measurement_consumer = measurement_consumer
+        super().__init__(name, callbacks, unit=unit, description=description)
+
+        self._callbacks: List[CallbackT] = []
+
+        if callbacks is not None:
+            for callback in callbacks:
+                if isinstance(callback, Generator):
+                    # advance generator to it's first yield
+                    next(callback)
+
+                    def inner(
+                        options: CallbackOptions,
+                        callback=callback,
+                    ) -> Iterable[Measurement]:
+                        try:
+                            return callback.send(options)
+                        except StopIteration:
+                            return []
+
+                    self._callbacks.append(inner)
+                else:
+                    self._callbacks.append(callback)
+
+    def callback(
+        self, callback_options: CallbackOptions
+    ) -> Iterable[Measurement]:
+        for callback in self._callbacks:
+            try:
+                for api_measurement in callback(callback_options):
+                    yield Measurement(
+                        api_measurement.value,
+                        time_unix_nano=time_ns(),
+                        instrument=self,
+                        context=api_measurement.context or get_current(),
+                        attributes=api_measurement.attributes,
+                    )
+            except Exception:  # pylint: disable=broad-exception-caught
+                _logger.exception(
+                    "Callback failed for instrument %s.", self.name
+                )
+
+
+class Counter(_Synchronous, APICounter):
+    def __new__(cls, *args, **kwargs):
+        if cls is Counter:
+            raise TypeError("Counter must be instantiated via a meter.")
+        return super().__new__(cls)
+
+    def add(
+        self,
+        amount: Union[int, float],
+        attributes: dict[str, str] | None = None,
+        context: Context | None = None,
+    ):
+        if amount < 0:
+            _logger.warning(
+                "Add amount must be non-negative on Counter %s.", self.name
+            )
+            return
+        time_unix_nano = time_ns()
+        self._measurement_consumer.consume_measurement(
+            Measurement(
+                amount,
+                time_unix_nano,
+                self,
+                context or get_current(),
+                attributes,
+            )
+        )
+
+
+class UpDownCounter(_Synchronous, APIUpDownCounter):
+    def __new__(cls, *args, **kwargs):
+        if cls is UpDownCounter:
+            raise TypeError("UpDownCounter must be instantiated via a meter.")
+        return super().__new__(cls)
+
+    def add(
+        self,
+        amount: Union[int, float],
+        attributes: dict[str, str] | None = None,
+        context: Context | None = None,
+    ):
+        time_unix_nano = time_ns()
+        self._measurement_consumer.consume_measurement(
+            Measurement(
+                amount,
+                time_unix_nano,
+                self,
+                context or get_current(),
+                attributes,
+            )
+        )
+
+
+class ObservableCounter(_Asynchronous, APIObservableCounter):
+    def __new__(cls, *args, **kwargs):
+        if cls is ObservableCounter:
+            raise TypeError(
+                "ObservableCounter must be instantiated via a meter."
+            )
+        return super().__new__(cls)
+
+
+class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter):
+    def __new__(cls, *args, **kwargs):
+        if cls is ObservableUpDownCounter:
+            raise TypeError(
+                "ObservableUpDownCounter must be instantiated via a meter."
+            )
+        return super().__new__(cls)
+
+
+class Histogram(_Synchronous, APIHistogram):
+    def __init__(
+        self,
+        name: str,
+        instrumentation_scope: InstrumentationScope,
+        measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
+        unit: str = "",
+        description: str = "",
+        explicit_bucket_boundaries_advisory: Sequence[float] | None = None,
+    ):
+        super().__init__(
+            name,
+            unit=unit,
+            description=description,
+            instrumentation_scope=instrumentation_scope,
+            measurement_consumer=measurement_consumer,
+        )
+        self._advisory = _MetricsHistogramAdvisory(
+            explicit_bucket_boundaries=explicit_bucket_boundaries_advisory
+        )
+
+    def __new__(cls, *args, **kwargs):
+        if cls is Histogram:
+            raise TypeError("Histogram must be instantiated via a meter.")
+        return super().__new__(cls)
+
+    def record(
+        self,
+        amount: Union[int, float],
+        attributes: dict[str, str] | None = None,
+        context: Context | None = None,
+    ):
+        if amount < 0:
+            _logger.warning(
+                "Record amount must be non-negative on Histogram %s.",
+                self.name,
+            )
+            return
+        time_unix_nano = time_ns()
+        self._measurement_consumer.consume_measurement(
+            Measurement(
+                amount,
+                time_unix_nano,
+                self,
+                context or get_current(),
+                attributes,
+            )
+        )
+
+
+class Gauge(_Synchronous, APIGauge):
+    def __new__(cls, *args, **kwargs):
+        if cls is Gauge:
+            raise TypeError("Gauge must be instantiated via a meter.")
+        return super().__new__(cls)
+
+    def set(
+        self,
+        amount: Union[int, float],
+        attributes: dict[str, str] | None = None,
+        context: Context | None = None,
+    ):
+        time_unix_nano = time_ns()
+        self._measurement_consumer.consume_measurement(
+            Measurement(
+                amount,
+                time_unix_nano,
+                self,
+                context or get_current(),
+                attributes,
+            )
+        )
+
+
+class ObservableGauge(_Asynchronous, APIObservableGauge):
+    def __new__(cls, *args, **kwargs):
+        if cls is ObservableGauge:
+            raise TypeError(
+                "ObservableGauge must be instantiated via a meter."
+            )
+        return super().__new__(cls)
+
+
+# Below classes exist to prevent the direct instantiation
+class _Counter(Counter):
+    pass
+
+
+class _UpDownCounter(UpDownCounter):
+    pass
+
+
+class _ObservableCounter(ObservableCounter):
+    pass
+
+
+class _ObservableUpDownCounter(ObservableUpDownCounter):
+    pass
+
+
+class _Histogram(Histogram):
+    pass
+
+
+class _Gauge(Gauge):
+    pass
+
+
+class _ObservableGauge(ObservableGauge):
+    pass
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py
new file mode 100644
index 00000000..56619a83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py
@@ -0,0 +1,45 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from typing import Union
+
+from opentelemetry.context import Context
+from opentelemetry.metrics import Instrument
+from opentelemetry.util.types import Attributes
+
+
+@dataclass(frozen=True)
+class Measurement:
+    """
+    Represents a data point reported via the metrics API to the SDK.
+
+    Attributes
+        value: Measured value
+        time_unix_nano: The time the API call was made to record the Measurement
+        instrument: The instrument that produced this `Measurement`.
+        context: The active Context of the Measurement at API call time.
+        attributes: Measurement attributes
+    """
+
+    # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
+    # one will come from napoleon extension and the other from autodoc extension. This
+    # will raise an sphinx error of duplicated object description
+    # See https://github.com/sphinx-doc/sphinx/issues/8664
+
+    value: Union[int, float]
+    time_unix_nano: int
+    instrument: Instrument
+    context: Context
+    attributes: Attributes = None
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py
new file mode 100644
index 00000000..c6510330
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py
@@ -0,0 +1,145 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=unused-import
+
+from abc import ABC, abstractmethod
+from threading import Lock
+from time import time_ns
+from typing import Iterable, List, Mapping, Optional
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics
+import opentelemetry.sdk.metrics._internal.instrument
+import opentelemetry.sdk.metrics._internal.sdk_configuration
+from opentelemetry.metrics._internal.instrument import CallbackOptions
+from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
+from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.metrics._internal.metric_reader_storage import (
+    MetricReaderStorage,
+)
+from opentelemetry.sdk.metrics._internal.point import Metric
+
+
+class MeasurementConsumer(ABC):
+    @abstractmethod
+    def consume_measurement(self, measurement: Measurement) -> None:
+        pass
+
+    @abstractmethod
+    def register_asynchronous_instrument(
+        self,
+        instrument: (
+            "opentelemetry.sdk.metrics._internal.instrument_Asynchronous"
+        ),
+    ):
+        pass
+
+    @abstractmethod
+    def collect(
+        self,
+        metric_reader: "opentelemetry.sdk.metrics.MetricReader",
+        timeout_millis: float = 10_000,
+    ) -> Optional[Iterable[Metric]]:
+        pass
+
+
+class SynchronousMeasurementConsumer(MeasurementConsumer):
+    def __init__(
+        self,
+        sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration",
+    ) -> None:
+        self._lock = Lock()
+        self._sdk_config = sdk_config
+        # should never be mutated
+        self._reader_storages: Mapping[
+            "opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage
+        ] = {
+            reader: MetricReaderStorage(
+                sdk_config,
+                reader._instrument_class_temporality,
+                reader._instrument_class_aggregation,
+            )
+            for reader in sdk_config.metric_readers
+        }
+        self._async_instruments: List[
+            "opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
+        ] = []
+
+    def consume_measurement(self, measurement: Measurement) -> None:
+        should_sample_exemplar = (
+            self._sdk_config.exemplar_filter.should_sample(
+                measurement.value,
+                measurement.time_unix_nano,
+                measurement.attributes,
+                measurement.context,
+            )
+        )
+        for reader_storage in self._reader_storages.values():
+            reader_storage.consume_measurement(
+                measurement, should_sample_exemplar
+            )
+
+    def register_asynchronous_instrument(
+        self,
+        instrument: (
+            "opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
+        ),
+    ) -> None:
+        with self._lock:
+            self._async_instruments.append(instrument)
+
+    def collect(
+        self,
+        metric_reader: "opentelemetry.sdk.metrics.MetricReader",
+        timeout_millis: float = 10_000,
+    ) -> Optional[Iterable[Metric]]:
+        with self._lock:
+            metric_reader_storage = self._reader_storages[metric_reader]
+            # for now, just use the defaults
+            callback_options = CallbackOptions()
+            deadline_ns = time_ns() + (timeout_millis * 1e6)
+
+            default_timeout_ns = 10000 * 1e6
+
+            for async_instrument in self._async_instruments:
+                remaining_time = deadline_ns - time_ns()
+
+                if remaining_time < default_timeout_ns:
+                    callback_options = CallbackOptions(
+                        timeout_millis=remaining_time / 1e6
+                    )
+
+                measurements = async_instrument.callback(callback_options)
+                if time_ns() >= deadline_ns:
+                    raise MetricsTimeoutError(
+                        "Timed out while executing callback"
+                    )
+
+                for measurement in measurements:
+                    should_sample_exemplar = (
+                        self._sdk_config.exemplar_filter.should_sample(
+                            measurement.value,
+                            measurement.time_unix_nano,
+                            measurement.attributes,
+                            measurement.context,
+                        )
+                    )
+                    metric_reader_storage.consume_measurement(
+                        measurement, should_sample_exemplar
+                    )
+
+            result = self._reader_storages[metric_reader].collect()
+
+        return result
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py
new file mode 100644
index 00000000..f5121811
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py
@@ -0,0 +1,315 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from logging import getLogger
+from threading import RLock
+from time import time_ns
+from typing import Dict, List, Optional
+
+from opentelemetry.metrics import (
+    Asynchronous,
+    Counter,
+    Instrument,
+    ObservableCounter,
+)
+from opentelemetry.sdk.metrics._internal._view_instrument_match import (
+    _ViewInstrumentMatch,
+)
+from opentelemetry.sdk.metrics._internal.aggregation import (
+    Aggregation,
+    ExplicitBucketHistogramAggregation,
+    _DropAggregation,
+    _ExplicitBucketHistogramAggregation,
+    _ExponentialBucketHistogramAggregation,
+    _LastValueAggregation,
+    _SumAggregation,
+)
+from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
+from opentelemetry.sdk.metrics._internal.measurement import Measurement
+from opentelemetry.sdk.metrics._internal.point import (
+    ExponentialHistogram,
+    Gauge,
+    Histogram,
+    Metric,
+    MetricsData,
+    ResourceMetrics,
+    ScopeMetrics,
+    Sum,
+)
+from opentelemetry.sdk.metrics._internal.sdk_configuration import (
+    SdkConfiguration,
+)
+from opentelemetry.sdk.metrics._internal.view import View
+from opentelemetry.sdk.util.instrumentation import InstrumentationScope
+
+_logger = getLogger(__name__)
+
+_DEFAULT_VIEW = View(instrument_name="")
+
+
+class MetricReaderStorage:
+    """The SDK's storage for a given reader"""
+
+    def __init__(
+        self,
+        sdk_config: SdkConfiguration,
+        instrument_class_temporality: Dict[type, AggregationTemporality],
+        instrument_class_aggregation: Dict[type, Aggregation],
+    ) -> None:
+        self._lock = RLock()
+        self._sdk_config = sdk_config
+        self._instrument_view_instrument_matches: Dict[
+            Instrument, List[_ViewInstrumentMatch]
+        ] = {}
+        self._instrument_class_temporality = instrument_class_temporality
+        self._instrument_class_aggregation = instrument_class_aggregation
+
+    def _get_or_init_view_instrument_match(
+        self, instrument: Instrument
+    ) -> List[_ViewInstrumentMatch]:
+        # Optimistically get the relevant views for the given instrument. Once set for a given
+        # instrument, the mapping will never change
+
+        if instrument in self._instrument_view_instrument_matches:
+            return self._instrument_view_instrument_matches[instrument]
+
+        with self._lock:
+            # double check if it was set before we held the lock
+            if instrument in self._instrument_view_instrument_matches:
+                return self._instrument_view_instrument_matches[instrument]
+
+            # not present, hold the lock and add a new mapping
+            view_instrument_matches = []
+
+            self._handle_view_instrument_match(
+                instrument, view_instrument_matches
+            )
+
+            # if no view targeted the instrument, use the default
+            if not view_instrument_matches:
+                view_instrument_matches.append(
+                    _ViewInstrumentMatch(
+                        view=_DEFAULT_VIEW,
+                        instrument=instrument,
+                        instrument_class_aggregation=(
+                            self._instrument_class_aggregation
+                        ),
+                    )
+                )
+            self._instrument_view_instrument_matches[instrument] = (
+                view_instrument_matches
+            )
+
+            return view_instrument_matches
+
+    def consume_measurement(
+        self, measurement: Measurement, should_sample_exemplar: bool = True
+    ) -> None:
+        for view_instrument_match in self._get_or_init_view_instrument_match(
+            measurement.instrument
+        ):
+            view_instrument_match.consume_measurement(
+                measurement, should_sample_exemplar
+            )
+
+    def collect(self) -> Optional[MetricsData]:
+        # Use a list instead of yielding to prevent a slow reader from holding
+        # SDK locks
+
+        # While holding the lock, new _ViewInstrumentMatch can't be added from
+        # another thread (so we are sure we collect all existing view).
+        # However, instruments can still send measurements that will make it
+        # into the individual aggregations; collection will acquire those locks
+        # iteratively to keep locking as fine-grained as possible. One side
+        # effect is that end times can be slightly skewed among the metric
+        # streams produced by the SDK, but we still align the output timestamps
+        # for a single instrument.
+
+        collection_start_nanos = time_ns()
+
+        with self._lock:
+            instrumentation_scope_scope_metrics: Dict[
+                InstrumentationScope, ScopeMetrics
+            ] = {}
+
+            for (
+                instrument,
+                view_instrument_matches,
+            ) in self._instrument_view_instrument_matches.items():
+                aggregation_temporality = self._instrument_class_temporality[
+                    instrument.__class__
+                ]
+
+                metrics: List[Metric] = []
+
+                for view_instrument_match in view_instrument_matches:
+                    data_points = view_instrument_match.collect(
+                        aggregation_temporality, collection_start_nanos
+                    )
+
+                    if data_points is None:
+                        continue
+
+                    if isinstance(
+                        # pylint: disable=protected-access
+                        view_instrument_match._aggregation,
+                        _SumAggregation,
+                    ):
+                        data = Sum(
+                            aggregation_temporality=aggregation_temporality,
+                            data_points=data_points,
+                            is_monotonic=isinstance(
+                                instrument, (Counter, ObservableCounter)
+                            ),
+                        )
+                    elif isinstance(
+                        # pylint: disable=protected-access
+                        view_instrument_match._aggregation,
+                        _LastValueAggregation,
+                    ):
+                        data = Gauge(data_points=data_points)
+                    elif isinstance(
+                        # pylint: disable=protected-access
+                        view_instrument_match._aggregation,
+                        _ExplicitBucketHistogramAggregation,
+                    ):
+                        data = Histogram(
+                            data_points=data_points,
+                            aggregation_temporality=aggregation_temporality,
+                        )
+                    elif isinstance(
+                        # pylint: disable=protected-access
+                        view_instrument_match._aggregation,
+                        _DropAggregation,
+                    ):
+                        continue
+
+                    elif isinstance(
+                        # pylint: disable=protected-access
+                        view_instrument_match._aggregation,
+                        _ExponentialBucketHistogramAggregation,
+                    ):
+                        data = ExponentialHistogram(
+                            data_points=data_points,
+                            aggregation_temporality=aggregation_temporality,
+                        )
+
+                    metrics.append(
+                        Metric(
+                            # pylint: disable=protected-access
+                            # pylint: disable=possibly-used-before-assignment
+                            name=view_instrument_match._name,
+                            description=view_instrument_match._description,
+                            unit=view_instrument_match._instrument.unit,
+                            data=data,
+                        )
+                    )
+
+                if metrics:
+                    if instrument.instrumentation_scope not in (
+                        instrumentation_scope_scope_metrics
+                    ):
+                        instrumentation_scope_scope_metrics[
+                            instrument.instrumentation_scope
+                        ] = ScopeMetrics(
+                            scope=instrument.instrumentation_scope,
+                            metrics=metrics,
+                            schema_url=instrument.instrumentation_scope.schema_url,
+                        )
+                    else:
+                        instrumentation_scope_scope_metrics[
+                            instrument.instrumentation_scope
+                        ].metrics.extend(metrics)
+
+            if instrumentation_scope_scope_metrics:
+                return MetricsData(
+                    resource_metrics=[
+                        ResourceMetrics(
+                            resource=self._sdk_config.resource,
+                            scope_metrics=list(
+                                instrumentation_scope_scope_metrics.values()
+                            ),
+                            schema_url=self._sdk_config.resource.schema_url,
+                        )
+                    ]
+                )
+
+            return None
+
+    def _handle_view_instrument_match(
+        self,
+        instrument: Instrument,
+        view_instrument_matches: List["_ViewInstrumentMatch"],
+    ) -> None:
+        for view in self._sdk_config.views:
+            # pylint: disable=protected-access
+            if not view._match(instrument):
+                continue
+
+            if not self._check_view_instrument_compatibility(view, instrument):
+                continue
+
+            new_view_instrument_match = _ViewInstrumentMatch(
+                view=view,
+                instrument=instrument,
+                instrument_class_aggregation=(
+                    self._instrument_class_aggregation
+                ),
+            )
+
+            for (
+                existing_view_instrument_matches
+            ) in self._instrument_view_instrument_matches.values():
+                for (
+                    existing_view_instrument_match
+                ) in existing_view_instrument_matches:
+                    if existing_view_instrument_match.conflicts(
+                        new_view_instrument_match
+                    ):
+                        _logger.warning(
+                            "Views %s and %s will cause conflicting "
+                            "metrics identities",
+                            existing_view_instrument_match._view,
+                            new_view_instrument_match._view,
+                        )
+
+            view_instrument_matches.append(new_view_instrument_match)
+
+    @staticmethod
+    def _check_view_instrument_compatibility(
+        view: View, instrument: Instrument
+    ) -> bool:
+        """
+        Checks if a view and an instrument are compatible.
+
+        Returns `true` if they are compatible and a `_ViewInstrumentMatch`
+        object should be created, `false` otherwise.
+        """
+
+        result = True
+
+        # pylint: disable=protected-access
+        if isinstance(instrument, Asynchronous) and isinstance(
+            view._aggregation, ExplicitBucketHistogramAggregation
+        ):
+            _logger.warning(
+                "View %s and instrument %s will produce "
+                "semantic errors when matched, the view "
+                "has not been applied.",
+                view,
+                instrument,
+            )
+            result = False
+
+        return result
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py
new file mode 100644
index 00000000..8c7e3469
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py
@@ -0,0 +1,277 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=unused-import
+
+from dataclasses import asdict, dataclass, field
+from json import dumps, loads
+from typing import Optional, Sequence, Union
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics._internal
+from opentelemetry.sdk.metrics._internal.exemplar import Exemplar
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.util.instrumentation import InstrumentationScope
+from opentelemetry.util.types import Attributes
+
+
+@dataclass(frozen=True)
+class NumberDataPoint:
+    """Single data point in a timeseries that describes the time-varying scalar
+    value of a metric.
+    """
+
+    attributes: Attributes
+    start_time_unix_nano: int
+    time_unix_nano: int
+    value: Union[int, float]
+    exemplars: Sequence[Exemplar] = field(default_factory=list)
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(asdict(self), indent=indent)
+
+
+@dataclass(frozen=True)
+class HistogramDataPoint:
+    """Single data point in a timeseries that describes the time-varying scalar
+    value of a metric.
+    """
+
+    attributes: Attributes
+    start_time_unix_nano: int
+    time_unix_nano: int
+    count: int
+    sum: Union[int, float]
+    bucket_counts: Sequence[int]
+    explicit_bounds: Sequence[float]
+    min: float
+    max: float
+    exemplars: Sequence[Exemplar] = field(default_factory=list)
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(asdict(self), indent=indent)
+
+
+@dataclass(frozen=True)
+class Buckets:
+    offset: int
+    bucket_counts: Sequence[int]
+
+
+@dataclass(frozen=True)
+class ExponentialHistogramDataPoint:
+    """Single data point in a timeseries whose boundaries are defined by an
+    exponential function. This timeseries describes the time-varying scalar
+    value of a metric.
+    """
+
+    attributes: Attributes
+    start_time_unix_nano: int
+    time_unix_nano: int
+    count: int
+    sum: Union[int, float]
+    scale: int
+    zero_count: int
+    positive: Buckets
+    negative: Buckets
+    flags: int
+    min: float
+    max: float
+    exemplars: Sequence[Exemplar] = field(default_factory=list)
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(asdict(self), indent=indent)
+
+
+@dataclass(frozen=True)
+class ExponentialHistogram:
+    """Represents the type of a metric that is calculated by aggregating as an
+    ExponentialHistogram of all reported measurements over a time interval.
+    """
+
+    data_points: Sequence[ExponentialHistogramDataPoint]
+    aggregation_temporality: (
+        "opentelemetry.sdk.metrics.export.AggregationTemporality"
+    )
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "data_points": [
+                    loads(data_point.to_json(indent=indent))
+                    for data_point in self.data_points
+                ],
+                "aggregation_temporality": self.aggregation_temporality,
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class Sum:
+    """Represents the type of a scalar metric that is calculated as a sum of
+    all reported measurements over a time interval."""
+
+    data_points: Sequence[NumberDataPoint]
+    aggregation_temporality: (
+        "opentelemetry.sdk.metrics.export.AggregationTemporality"
+    )
+    is_monotonic: bool
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "data_points": [
+                    loads(data_point.to_json(indent=indent))
+                    for data_point in self.data_points
+                ],
+                "aggregation_temporality": self.aggregation_temporality,
+                "is_monotonic": self.is_monotonic,
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class Gauge:
+    """Represents the type of a scalar metric that always exports the current
+    value for every data point. It should be used for an unknown
+    aggregation."""
+
+    data_points: Sequence[NumberDataPoint]
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "data_points": [
+                    loads(data_point.to_json(indent=indent))
+                    for data_point in self.data_points
+                ],
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class Histogram:
+    """Represents the type of a metric that is calculated by aggregating as a
+    histogram of all reported measurements over a time interval."""
+
+    data_points: Sequence[HistogramDataPoint]
+    aggregation_temporality: (
+        "opentelemetry.sdk.metrics.export.AggregationTemporality"
+    )
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "data_points": [
+                    loads(data_point.to_json(indent=indent))
+                    for data_point in self.data_points
+                ],
+                "aggregation_temporality": self.aggregation_temporality,
+            },
+            indent=indent,
+        )
+
+
+# pylint: disable=invalid-name
+DataT = Union[Sum, Gauge, Histogram, ExponentialHistogram]
+DataPointT = Union[
+    NumberDataPoint, HistogramDataPoint, ExponentialHistogramDataPoint
+]
+
+
+@dataclass(frozen=True)
+class Metric:
+    """Represents a metric point in the OpenTelemetry data model to be
+    exported."""
+
+    name: str
+    description: Optional[str]
+    unit: Optional[str]
+    data: DataT
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "name": self.name,
+                "description": self.description or "",
+                "unit": self.unit or "",
+                "data": loads(self.data.to_json(indent=indent)),
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class ScopeMetrics:
+    """A collection of Metrics produced by a scope"""
+
+    scope: InstrumentationScope
+    metrics: Sequence[Metric]
+    schema_url: str
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "scope": loads(self.scope.to_json(indent=indent)),
+                "metrics": [
+                    loads(metric.to_json(indent=indent))
+                    for metric in self.metrics
+                ],
+                "schema_url": self.schema_url,
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class ResourceMetrics:
+    """A collection of ScopeMetrics from a Resource"""
+
+    resource: Resource
+    scope_metrics: Sequence[ScopeMetrics]
+    schema_url: str
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "resource": loads(self.resource.to_json(indent=indent)),
+                "scope_metrics": [
+                    loads(scope_metrics.to_json(indent=indent))
+                    for scope_metrics in self.scope_metrics
+                ],
+                "schema_url": self.schema_url,
+            },
+            indent=indent,
+        )
+
+
+@dataclass(frozen=True)
+class MetricsData:
+    """An array of ResourceMetrics"""
+
+    resource_metrics: Sequence[ResourceMetrics]
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "resource_metrics": [
+                    loads(resource_metrics.to_json(indent=indent))
+                    for resource_metrics in self.resource_metrics
+                ]
+            },
+            indent=indent,
+        )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py
new file mode 100644
index 00000000..3d88facb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py
@@ -0,0 +1,30 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=unused-import
+
+from dataclasses import dataclass
+from typing import Sequence
+
+# This kind of import is needed to avoid Sphinx errors.
+import opentelemetry.sdk.metrics
+import opentelemetry.sdk.resources
+
+
+@dataclass
+class SdkConfiguration:
+    exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter"
+    resource: "opentelemetry.sdk.resources.Resource"
+    metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"]
+    views: Sequence["opentelemetry.sdk.metrics.View"]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py
new file mode 100644
index 00000000..b3fa029d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py
@@ -0,0 +1,195 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from fnmatch import fnmatch
+from logging import getLogger
+from typing import Callable, Optional, Set, Type
+
+from opentelemetry.metrics import Instrument
+from opentelemetry.sdk.metrics._internal.aggregation import (
+    Aggregation,
+    DefaultAggregation,
+    _Aggregation,
+    _ExplicitBucketHistogramAggregation,
+    _ExponentialBucketHistogramAggregation,
+)
+from opentelemetry.sdk.metrics._internal.exemplar import (
+    AlignedHistogramBucketExemplarReservoir,
+    ExemplarReservoirBuilder,
+    SimpleFixedSizeExemplarReservoir,
+)
+
+_logger = getLogger(__name__)
+
+
+def _default_reservoir_factory(
+    aggregation_type: Type[_Aggregation],
+) -> ExemplarReservoirBuilder:
+    """Default reservoir factory per aggregation."""
+    if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation):
+        return AlignedHistogramBucketExemplarReservoir
+    if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation):
+        return SimpleFixedSizeExemplarReservoir
+    return SimpleFixedSizeExemplarReservoir
+
+
+class View:
+    """
+    A `View` configuration parameters can be used for the following
+    purposes:
+
+    1. Match instruments: When an instrument matches a view, measurements
+       received by that instrument will be processed.
+    2. Customize metric streams: A metric stream is identified by a match
+       between a view and an instrument and a set of attributes. The metric
+       stream can be customized by certain attributes of the corresponding view.
+
+    The attributes documented next serve one of the previous two purposes.
+
+    Args:
+        instrument_type: This is an instrument matching attribute: the class the
+            instrument must be to match the view.
+
+        instrument_name: This is an instrument matching attribute: the name the
+            instrument must have to match the view. Wild card characters are supported. Wild
+            card characters should not be used with this attribute if the view has also a
+            ``name`` defined.
+
+        meter_name: This is an instrument matching attribute: the name the
+            instrument meter must have to match the view.
+
+        meter_version: This is an instrument matching attribute: the version
+            the instrument meter must have to match the view.
+
+        meter_schema_url: This is an instrument matching attribute: the schema
+            URL the instrument meter must have to match the view.
+
+        name: This is a metric stream customizing attribute: the name of the
+            metric stream. If `None`, the name of the instrument will be used.
+
+        description: This is a metric stream customizing attribute: the
+            description of the metric stream. If `None`, the description of the instrument will
+            be used.
+
+        attribute_keys: This is a metric stream customizing attribute: this is
+            a set of attribute keys. If not `None` then only the measurement attributes that
+            are in ``attribute_keys`` will be used to identify the metric stream.
+
+        aggregation: This is a metric stream customizing attribute: the
+            aggregation instance to use when data is aggregated for the
+            corresponding metrics stream. If `None` an instance of
+            `DefaultAggregation` will be used.
+
+        exemplar_reservoir_factory: This is a metric stream customizing attribute:
+            the exemplar reservoir factory
+
+        instrument_unit: This is an instrument matching attribute: the unit the
+            instrument must have to match the view.
+
+    This class is not intended to be subclassed by the user.
+    """
+
+    _default_aggregation = DefaultAggregation()
+
+    def __init__(
+        self,
+        instrument_type: Optional[Type[Instrument]] = None,
+        instrument_name: Optional[str] = None,
+        meter_name: Optional[str] = None,
+        meter_version: Optional[str] = None,
+        meter_schema_url: Optional[str] = None,
+        name: Optional[str] = None,
+        description: Optional[str] = None,
+        attribute_keys: Optional[Set[str]] = None,
+        aggregation: Optional[Aggregation] = None,
+        exemplar_reservoir_factory: Optional[
+            Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]
+        ] = None,
+        instrument_unit: Optional[str] = None,
+    ):
+        if (
+            instrument_type
+            is instrument_name
+            is instrument_unit
+            is meter_name
+            is meter_version
+            is meter_schema_url
+            is None
+        ):
+            # pylint: disable=broad-exception-raised
+            raise Exception(
+                "Some instrument selection "
+                f"criteria must be provided for View {name}"
+            )
+
+        if (
+            name is not None
+            and instrument_name is not None
+            and ("*" in instrument_name or "?" in instrument_name)
+        ):
+            # pylint: disable=broad-exception-raised
+            raise Exception(
+                f"View {name} declared with wildcard "
+                "characters in instrument_name"
+            )
+
+        # _name, _description, _aggregation, _exemplar_reservoir_factory and
+        # _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch.
+        self._name = name
+        self._instrument_type = instrument_type
+        self._instrument_name = instrument_name
+        self._instrument_unit = instrument_unit
+        self._meter_name = meter_name
+        self._meter_version = meter_version
+        self._meter_schema_url = meter_schema_url
+
+        self._description = description
+        self._attribute_keys = attribute_keys
+        self._aggregation = aggregation or self._default_aggregation
+        self._exemplar_reservoir_factory = (
+            exemplar_reservoir_factory or _default_reservoir_factory
+        )
+
+    # pylint: disable=too-many-return-statements
+    # pylint: disable=too-many-branches
+    def _match(self, instrument: Instrument) -> bool:
+        if self._instrument_type is not None:
+            if not isinstance(instrument, self._instrument_type):
+                return False
+
+        if self._instrument_name is not None:
+            if not fnmatch(instrument.name, self._instrument_name):
+                return False
+
+        if self._instrument_unit is not None:
+            if not fnmatch(instrument.unit, self._instrument_unit):
+                return False
+
+        if self._meter_name is not None:
+            if instrument.instrumentation_scope.name != self._meter_name:
+                return False
+
+        if self._meter_version is not None:
+            if instrument.instrumentation_scope.version != self._meter_version:
+                return False
+
+        if self._meter_schema_url is not None:
+            if (
+                instrument.instrumentation_scope.schema_url
+                != self._meter_schema_url
+            ):
+                return False
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py
new file mode 100644
index 00000000..478237cd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py
@@ -0,0 +1,66 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from opentelemetry.sdk.metrics._internal.export import (
+    AggregationTemporality,
+    ConsoleMetricExporter,
+    InMemoryMetricReader,
+    MetricExporter,
+    MetricExportResult,
+    MetricReader,
+    PeriodicExportingMetricReader,
+)
+
+# The point module is not in the export directory to avoid a circular import.
+from opentelemetry.sdk.metrics._internal.point import (  # noqa: F401
+    Buckets,
+    DataPointT,
+    DataT,
+    ExponentialHistogram,
+    ExponentialHistogramDataPoint,
+    Gauge,
+    Histogram,
+    HistogramDataPoint,
+    Metric,
+    MetricsData,
+    NumberDataPoint,
+    ResourceMetrics,
+    ScopeMetrics,
+    Sum,
+)
+
+__all__ = [
+    "AggregationTemporality",
+    "Buckets",
+    "ConsoleMetricExporter",
+    "InMemoryMetricReader",
+    "MetricExporter",
+    "MetricExportResult",
+    "MetricReader",
+    "PeriodicExportingMetricReader",
+    "DataPointT",
+    "DataT",
+    "ExponentialHistogram",
+    "ExponentialHistogramDataPoint",
+    "Gauge",
+    "Histogram",
+    "HistogramDataPoint",
+    "Metric",
+    "MetricsData",
+    "NumberDataPoint",
+    "ResourceMetrics",
+    "ScopeMetrics",
+    "Sum",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py
new file mode 100644
index 00000000..c07adf6c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py
@@ -0,0 +1,35 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from opentelemetry.sdk.metrics._internal.aggregation import (
+    Aggregation,
+    DefaultAggregation,
+    DropAggregation,
+    ExplicitBucketHistogramAggregation,
+    ExponentialBucketHistogramAggregation,
+    LastValueAggregation,
+    SumAggregation,
+)
+from opentelemetry.sdk.metrics._internal.view import View
+
+__all__ = [
+    "Aggregation",
+    "DefaultAggregation",
+    "DropAggregation",
+    "ExplicitBucketHistogramAggregation",
+    "ExponentialBucketHistogramAggregation",
+    "LastValueAggregation",
+    "SumAggregation",
+    "View",
+]
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py
new file mode 100644
index 00000000..752b9067
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py
@@ -0,0 +1,541 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This package implements `OpenTelemetry Resources
+<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:
+
+    *A Resource is an immutable representation of the entity producing
+    telemetry. For example, a process producing telemetry that is running in
+    a container on Kubernetes has a Pod name, it is in a namespace and
+    possibly is part of a Deployment which also has a name. All three of
+    these attributes can be included in the Resource.*
+
+Resource objects are created with `Resource.create`, which accepts attributes
+(key-values). Resources should NOT be created via constructor except by `ResourceDetector`
+instances which can't use `Resource.create` to avoid infinite loops. Working with
+`Resource` objects should only be done via the Resource API methods. Resource
+attributes can also be passed at process invocation in the
+:envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should register
+your resource with the  `opentelemetry.sdk.trace.TracerProvider` by passing
+them into their constructors. The `Resource` passed to a provider is available
+to the exporter, which can send on this information as it sees fit.
+
+.. code-block:: python
+
+    trace.set_tracer_provider(
+        TracerProvider(
+            resource=Resource.create({
+                "service.name": "shoppingcart",
+                "service.instance.id": "instance-12",
+            }),
+        ),
+    )
+    print(trace.get_tracer_provider().resource.attributes)
+
+    {'telemetry.sdk.language': 'python',
+    'telemetry.sdk.name': 'opentelemetry',
+    'telemetry.sdk.version': '0.13.dev0',
+    'service.name': 'shoppingcart',
+    'service.instance.id': 'instance-12'}
+
+Note that the OpenTelemetry project documents certain `"standard attributes"
+<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_
+that have prescribed semantic meanings, for example ``service.name`` in the
+above example.
+"""
+
+import abc
+import concurrent.futures
+import logging
+import os
+import platform
+import socket
+import sys
+import typing
+from json import dumps
+from os import environ
+from types import ModuleType
+from typing import List, MutableMapping, Optional, cast
+from urllib import parse
+
+from opentelemetry.attributes import BoundedAttributes
+from opentelemetry.sdk.environment_variables import (
+    OTEL_EXPERIMENTAL_RESOURCE_DETECTORS,
+    OTEL_RESOURCE_ATTRIBUTES,
+    OTEL_SERVICE_NAME,
+)
+from opentelemetry.semconv.resource import ResourceAttributes
+from opentelemetry.util._importlib_metadata import entry_points, version
+from opentelemetry.util.types import AttributeValue
+
+psutil: Optional[ModuleType] = None
+
+try:
+    import psutil as psutil_module
+
+    psutil = psutil_module
+except ImportError:
+    pass
+
+LabelValue = AttributeValue
+Attributes = typing.Mapping[str, LabelValue]
+logger = logging.getLogger(__name__)
+
+CLOUD_PROVIDER = ResourceAttributes.CLOUD_PROVIDER
+CLOUD_ACCOUNT_ID = ResourceAttributes.CLOUD_ACCOUNT_ID
+CLOUD_REGION = ResourceAttributes.CLOUD_REGION
+CLOUD_AVAILABILITY_ZONE = ResourceAttributes.CLOUD_AVAILABILITY_ZONE
+CONTAINER_NAME = ResourceAttributes.CONTAINER_NAME
+CONTAINER_ID = ResourceAttributes.CONTAINER_ID
+CONTAINER_IMAGE_NAME = ResourceAttributes.CONTAINER_IMAGE_NAME
+CONTAINER_IMAGE_TAG = ResourceAttributes.CONTAINER_IMAGE_TAG
+DEPLOYMENT_ENVIRONMENT = ResourceAttributes.DEPLOYMENT_ENVIRONMENT
+FAAS_NAME = ResourceAttributes.FAAS_NAME
+FAAS_ID = ResourceAttributes.FAAS_ID
+FAAS_VERSION = ResourceAttributes.FAAS_VERSION
+FAAS_INSTANCE = ResourceAttributes.FAAS_INSTANCE
+HOST_NAME = ResourceAttributes.HOST_NAME
+HOST_ARCH = ResourceAttributes.HOST_ARCH
+HOST_TYPE = ResourceAttributes.HOST_TYPE
+HOST_IMAGE_NAME = ResourceAttributes.HOST_IMAGE_NAME
+HOST_IMAGE_ID = ResourceAttributes.HOST_IMAGE_ID
+HOST_IMAGE_VERSION = ResourceAttributes.HOST_IMAGE_VERSION
+KUBERNETES_CLUSTER_NAME = ResourceAttributes.K8S_CLUSTER_NAME
+KUBERNETES_NAMESPACE_NAME = ResourceAttributes.K8S_NAMESPACE_NAME
+KUBERNETES_POD_UID = ResourceAttributes.K8S_POD_UID
+KUBERNETES_POD_NAME = ResourceAttributes.K8S_POD_NAME
+KUBERNETES_CONTAINER_NAME = ResourceAttributes.K8S_CONTAINER_NAME
+KUBERNETES_REPLICA_SET_UID = ResourceAttributes.K8S_REPLICASET_UID
+KUBERNETES_REPLICA_SET_NAME = ResourceAttributes.K8S_REPLICASET_NAME
+KUBERNETES_DEPLOYMENT_UID = ResourceAttributes.K8S_DEPLOYMENT_UID
+KUBERNETES_DEPLOYMENT_NAME = ResourceAttributes.K8S_DEPLOYMENT_NAME
+KUBERNETES_STATEFUL_SET_UID = ResourceAttributes.K8S_STATEFULSET_UID
+KUBERNETES_STATEFUL_SET_NAME = ResourceAttributes.K8S_STATEFULSET_NAME
+KUBERNETES_DAEMON_SET_UID = ResourceAttributes.K8S_DAEMONSET_UID
+KUBERNETES_DAEMON_SET_NAME = ResourceAttributes.K8S_DAEMONSET_NAME
+KUBERNETES_JOB_UID = ResourceAttributes.K8S_JOB_UID
+KUBERNETES_JOB_NAME = ResourceAttributes.K8S_JOB_NAME
+KUBERNETES_CRON_JOB_UID = ResourceAttributes.K8S_CRONJOB_UID
+KUBERNETES_CRON_JOB_NAME = ResourceAttributes.K8S_CRONJOB_NAME
+OS_DESCRIPTION = ResourceAttributes.OS_DESCRIPTION
+OS_TYPE = ResourceAttributes.OS_TYPE
+OS_VERSION = ResourceAttributes.OS_VERSION
+PROCESS_PID = ResourceAttributes.PROCESS_PID
+PROCESS_PARENT_PID = ResourceAttributes.PROCESS_PARENT_PID
+PROCESS_EXECUTABLE_NAME = ResourceAttributes.PROCESS_EXECUTABLE_NAME
+PROCESS_EXECUTABLE_PATH = ResourceAttributes.PROCESS_EXECUTABLE_PATH
+PROCESS_COMMAND = ResourceAttributes.PROCESS_COMMAND
+PROCESS_COMMAND_LINE = ResourceAttributes.PROCESS_COMMAND_LINE
+PROCESS_COMMAND_ARGS = ResourceAttributes.PROCESS_COMMAND_ARGS
+PROCESS_OWNER = ResourceAttributes.PROCESS_OWNER
+PROCESS_RUNTIME_NAME = ResourceAttributes.PROCESS_RUNTIME_NAME
+PROCESS_RUNTIME_VERSION = ResourceAttributes.PROCESS_RUNTIME_VERSION
+PROCESS_RUNTIME_DESCRIPTION = ResourceAttributes.PROCESS_RUNTIME_DESCRIPTION
+SERVICE_NAME = ResourceAttributes.SERVICE_NAME
+SERVICE_NAMESPACE = ResourceAttributes.SERVICE_NAMESPACE
+SERVICE_INSTANCE_ID = ResourceAttributes.SERVICE_INSTANCE_ID
+SERVICE_VERSION = ResourceAttributes.SERVICE_VERSION
+TELEMETRY_SDK_NAME = ResourceAttributes.TELEMETRY_SDK_NAME
+TELEMETRY_SDK_VERSION = ResourceAttributes.TELEMETRY_SDK_VERSION
+TELEMETRY_AUTO_VERSION = ResourceAttributes.TELEMETRY_AUTO_VERSION
+TELEMETRY_SDK_LANGUAGE = ResourceAttributes.TELEMETRY_SDK_LANGUAGE
+
+_OPENTELEMETRY_SDK_VERSION: str = version("opentelemetry-sdk")
+
+
+class Resource:
+    """A Resource is an immutable representation of the entity producing telemetry as Attributes."""
+
+    _attributes: BoundedAttributes
+    _schema_url: str
+
+    def __init__(
+        self, attributes: Attributes, schema_url: typing.Optional[str] = None
+    ):
+        self._attributes = BoundedAttributes(attributes=attributes)
+        if schema_url is None:
+            schema_url = ""
+        self._schema_url = schema_url
+
+    @staticmethod
+    def create(
+        attributes: typing.Optional[Attributes] = None,
+        schema_url: typing.Optional[str] = None,
+    ) -> "Resource":
+        """Creates a new `Resource` from attributes.
+
+        `ResourceDetector` instances should not call this method.
+
+        Args:
+            attributes: Optional zero or more key-value pairs.
+            schema_url: Optional URL pointing to the schema
+
+        Returns:
+            The newly-created Resource.
+        """
+
+        if not attributes:
+            attributes = {}
+
+        otel_experimental_resource_detectors = {"otel"}.union(
+            {
+                otel_experimental_resource_detector.strip()
+                for otel_experimental_resource_detector in environ.get(
+                    OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, ""
+                ).split(",")
+                if otel_experimental_resource_detector
+            }
+        )
+
+        resource_detectors: List[ResourceDetector] = []
+
+        resource_detector: str
+        for resource_detector in otel_experimental_resource_detectors:
+            try:
+                resource_detectors.append(
+                    next(
+                        iter(
+                            entry_points(
+                                group="opentelemetry_resource_detector",
+                                name=resource_detector.strip(),
+                            )  # type: ignore
+                        )
+                    ).load()()
+                )
+            except Exception:  # pylint: disable=broad-exception-caught
+                logger.exception(
+                    "Failed to load resource detector '%s', skipping",
+                    resource_detector,
+                )
+                continue
+        resource = get_aggregated_resources(
+            resource_detectors, _DEFAULT_RESOURCE
+        ).merge(Resource(attributes, schema_url))
+
+        if not resource.attributes.get(SERVICE_NAME, None):
+            default_service_name = "unknown_service"
+            process_executable_name = cast(
+                Optional[str],
+                resource.attributes.get(PROCESS_EXECUTABLE_NAME, None),
+            )
+            if process_executable_name:
+                default_service_name += ":" + process_executable_name
+            resource = resource.merge(
+                Resource({SERVICE_NAME: default_service_name}, schema_url)
+            )
+        return resource
+
+    @staticmethod
+    def get_empty() -> "Resource":
+        return _EMPTY_RESOURCE
+
+    @property
+    def attributes(self) -> Attributes:
+        return self._attributes
+
+    @property
+    def schema_url(self) -> str:
+        return self._schema_url
+
+    def merge(self, other: "Resource") -> "Resource":
+        """Merges this resource and an updating resource into a new `Resource`.
+
+        If a key exists on both the old and updating resource, the value of the
+        updating resource will override the old resource value.
+
+        The updating resource's `schema_url` will be used only if the old
+        `schema_url` is empty. Attempting to merge two resources with
+        different, non-empty values for `schema_url` will result in an error
+        and return the old resource.
+
+        Args:
+            other: The other resource to be merged.
+
+        Returns:
+            The newly-created Resource.
+        """
+        merged_attributes = self.attributes.copy()  # type: ignore
+        merged_attributes.update(other.attributes)  # type: ignore
+
+        if self.schema_url == "":
+            schema_url = other.schema_url
+        elif other.schema_url == "":
+            schema_url = self.schema_url
+        elif self.schema_url == other.schema_url:
+            schema_url = other.schema_url
+        else:
+            logger.error(
+                "Failed to merge resources: The two schemas %s and %s are incompatible",
+                self.schema_url,
+                other.schema_url,
+            )
+            return self
+        return Resource(merged_attributes, schema_url)  # type: ignore
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, Resource):
+            return False
+        return (
+            self._attributes == other._attributes
+            and self._schema_url == other._schema_url
+        )
+
+    def __hash__(self) -> int:
+        return hash(
+            f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}"  # type: ignore
+        )
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        attributes: MutableMapping[str, AttributeValue] = dict(
+            self._attributes
+        )
+        return dumps(
+            {
+                "attributes": attributes,  # type: ignore
+                "schema_url": self._schema_url,
+            },
+            indent=indent,
+        )
+
+
+_EMPTY_RESOURCE = Resource({})
+_DEFAULT_RESOURCE = Resource(
+    {
+        TELEMETRY_SDK_LANGUAGE: "python",
+        TELEMETRY_SDK_NAME: "opentelemetry",
+        TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION,
+    }
+)
+
+
+class ResourceDetector(abc.ABC):
+    def __init__(self, raise_on_error: bool = False) -> None:
+        self.raise_on_error = raise_on_error
+
+    @abc.abstractmethod
+    def detect(self) -> "Resource":
+        """Don't call `Resource.create` here to avoid an infinite loop, instead instantiate `Resource` directly"""
+        raise NotImplementedError()
+
+
+class OTELResourceDetector(ResourceDetector):
+    # pylint: disable=no-self-use
+    def detect(self) -> "Resource":
+        env_resources_items = environ.get(OTEL_RESOURCE_ATTRIBUTES)
+        env_resource_map = {}
+
+        if env_resources_items:
+            for item in env_resources_items.split(","):
+                try:
+                    key, value = item.split("=", maxsplit=1)
+                except ValueError as exc:
+                    logger.warning(
+                        "Invalid key value resource attribute pair %s: %s",
+                        item,
+                        exc,
+                    )
+                    continue
+                value_url_decoded = parse.unquote(value.strip())
+                env_resource_map[key.strip()] = value_url_decoded
+
+        service_name = environ.get(OTEL_SERVICE_NAME)
+        if service_name:
+            env_resource_map[SERVICE_NAME] = service_name
+        return Resource(env_resource_map)
+
+
+class ProcessResourceDetector(ResourceDetector):
+    # pylint: disable=no-self-use
+    def detect(self) -> "Resource":
+        _runtime_version = ".".join(
+            map(
+                str,
+                (
+                    sys.version_info[:3]
+                    if sys.version_info.releaselevel == "final"
+                    and not sys.version_info.serial
+                    else sys.version_info
+                ),
+            )
+        )
+        _process_pid = os.getpid()
+        _process_executable_name = sys.executable
+        _process_executable_path = os.path.dirname(_process_executable_name)
+        _process_command = sys.argv[0]
+        _process_command_line = " ".join(sys.argv)
+        _process_command_args = sys.argv
+        resource_info = {
+            PROCESS_RUNTIME_DESCRIPTION: sys.version,
+            PROCESS_RUNTIME_NAME: sys.implementation.name,
+            PROCESS_RUNTIME_VERSION: _runtime_version,
+            PROCESS_PID: _process_pid,
+            PROCESS_EXECUTABLE_NAME: _process_executable_name,
+            PROCESS_EXECUTABLE_PATH: _process_executable_path,
+            PROCESS_COMMAND: _process_command,
+            PROCESS_COMMAND_LINE: _process_command_line,
+            PROCESS_COMMAND_ARGS: _process_command_args,
+        }
+        if hasattr(os, "getppid"):
+            # pypy3 does not have getppid()
+            resource_info[PROCESS_PARENT_PID] = os.getppid()
+
+        if psutil is not None:
+            process: psutil_module.Process = psutil.Process()
+            username = process.username()
+            resource_info[PROCESS_OWNER] = username
+
+        return Resource(resource_info)  # type: ignore
+
+
+class OsResourceDetector(ResourceDetector):
+    """Detect os resources based on `Operating System conventions <https://opentelemetry.io/docs/specs/semconv/resource/os/>`_."""
+
+    def detect(self) -> "Resource":
+        """Returns a resource with with ``os.type`` and ``os.version``.
+
+        Python's platform library
+        ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+        To grab this information, Python's ``platform`` does not return what a
+        user might expect it to. Below is a breakdown of its return values in
+        different operating systems.
+
+        .. code-block:: python
+            :caption: Linux
+
+            >>> platform.system()
+            'Linux'
+            >>> platform.release()
+            '6.5.0-35-generic'
+            >>> platform.version()
+            '#35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May  7 09:00:52 UTC 2'
+
+        .. code-block:: python
+            :caption: MacOS
+
+            >>> platform.system()
+            'Darwin'
+            >>> platform.release()
+            '23.0.0'
+            >>> platform.version()
+            'Darwin Kernel Version 23.0.0: Fri Sep 15 14:42:57 PDT 2023; root:xnu-10002.1.13~1/RELEASE_ARM64_T8112'
+
+        .. code-block:: python
+            :caption: Windows
+
+            >>> platform.system()
+            'Windows'
+            >>> platform.release()
+            '2022Server'
+            >>> platform.version()
+            '10.0.20348'
+
+        .. code-block:: python
+            :caption: FreeBSD
+
+            >>> platform.system()
+            'FreeBSD'
+            >>> platform.release()
+            '14.1-RELEASE'
+            >>> platform.version()
+            'FreeBSD 14.1-RELEASE releng/14.1-n267679-10e31f0946d8 GENERIC'
+
+        .. code-block:: python
+            :caption: Solaris
+
+            >>> platform.system()
+            'SunOS'
+            >>> platform.release()
+            '5.11'
+            >>> platform.version()
+            '11.4.0.15.0'
+
+        """
+
+        os_type = platform.system().lower()
+        os_version = platform.release()
+
+        # See docstring
+        if os_type == "windows":
+            os_version = platform.version()
+        # Align SunOS with conventions
+        elif os_type == "sunos":
+            os_type = "solaris"
+            os_version = platform.version()
+
+        return Resource(
+            {
+                OS_TYPE: os_type,
+                OS_VERSION: os_version,
+            }
+        )
+
+
+class _HostResourceDetector(ResourceDetector):
+    """
+    The HostResourceDetector detects the hostname and architecture attributes.
+    """
+
+    def detect(self) -> "Resource":
+        return Resource(
+            {
+                HOST_NAME: socket.gethostname(),
+                HOST_ARCH: platform.machine(),
+            }
+        )
+
+
+def get_aggregated_resources(
+    detectors: typing.List["ResourceDetector"],
+    initial_resource: typing.Optional[Resource] = None,
+    timeout: int = 5,
+) -> "Resource":
+    """Retrieves resources from detectors in the order that they were passed
+
+    :param detectors: List of resources in order of priority
+    :param initial_resource: Static resource. This has highest priority
+    :param timeout: Number of seconds to wait for each detector to return
+    :return:
+    """
+    detectors_merged_resource = initial_resource or Resource.create()
+
+    with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
+        futures = [executor.submit(detector.detect) for detector in detectors]
+        for detector_ind, future in enumerate(futures):
+            detector = detectors[detector_ind]
+            detected_resource: Resource = _EMPTY_RESOURCE
+            try:
+                detected_resource = future.result(timeout=timeout)
+            except concurrent.futures.TimeoutError as ex:
+                if detector.raise_on_error:
+                    raise ex
+                logger.warning(
+                    "Detector %s took longer than %s seconds, skipping",
+                    detector,
+                    timeout,
+                )
+            # pylint: disable=broad-exception-caught
+            except Exception as ex:
+                if detector.raise_on_error:
+                    raise ex
+                logger.warning(
+                    "Exception %s in detector %s, ignoring", ex, detector
+                )
+            finally:
+                detectors_merged_resource = detectors_merged_resource.merge(
+                    detected_resource
+                )
+
+    return detectors_merged_resource
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py
new file mode 100644
index 00000000..3ac45806
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py
@@ -0,0 +1,1305 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=too-many-lines
+import abc
+import atexit
+import concurrent.futures
+import json
+import logging
+import threading
+import traceback
+import typing
+from os import environ
+from time import time_ns
+from types import MappingProxyType, TracebackType
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    Iterator,
+    List,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+)
+from warnings import filterwarnings
+
+from deprecated import deprecated
+
+from opentelemetry import context as context_api
+from opentelemetry import trace as trace_api
+from opentelemetry.attributes import BoundedAttributes
+from opentelemetry.sdk import util
+from opentelemetry.sdk.environment_variables import (
+    OTEL_ATTRIBUTE_COUNT_LIMIT,
+    OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+    OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT,
+    OTEL_LINK_ATTRIBUTE_COUNT_LIMIT,
+    OTEL_SDK_DISABLED,
+    OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT,
+    OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+    OTEL_SPAN_EVENT_COUNT_LIMIT,
+    OTEL_SPAN_LINK_COUNT_LIMIT,
+)
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.trace import sampling
+from opentelemetry.sdk.trace.id_generator import IdGenerator, RandomIdGenerator
+from opentelemetry.sdk.util import BoundedList
+from opentelemetry.sdk.util.instrumentation import (
+    InstrumentationInfo,
+    InstrumentationScope,
+)
+from opentelemetry.semconv.attributes.exception_attributes import (
+    EXCEPTION_ESCAPED,
+    EXCEPTION_MESSAGE,
+    EXCEPTION_STACKTRACE,
+    EXCEPTION_TYPE,
+)
+from opentelemetry.trace import NoOpTracer, SpanContext
+from opentelemetry.trace.status import Status, StatusCode
+from opentelemetry.util import types
+from opentelemetry.util._decorator import _agnosticcontextmanager
+
+logger = logging.getLogger(__name__)
+
+_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128
+_DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = 128
+_DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = 128
+_DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = 128
+_DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT = 128
+_DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT = 128
+
+
+_ENV_VALUE_UNSET = ""
+
+
+class SpanProcessor:
+    """Interface which allows hooks for SDK's `Span` start and end method
+    invocations.
+
+    Span processors can be registered directly using
+    :func:`TracerProvider.add_span_processor` and they are invoked
+    in the same order as they were registered.
+    """
+
+    def on_start(
+        self,
+        span: "Span",
+        parent_context: Optional[context_api.Context] = None,
+    ) -> None:
+        """Called when a :class:`opentelemetry.trace.Span` is started.
+
+        This method is called synchronously on the thread that starts the
+        span, therefore it should not block or throw an exception.
+
+        Args:
+            span: The :class:`opentelemetry.trace.Span` that just started.
+            parent_context: The parent context of the span that just started.
+        """
+
+    def on_end(self, span: "ReadableSpan") -> None:
+        """Called when a :class:`opentelemetry.trace.Span` is ended.
+
+        This method is called synchronously on the thread that ends the
+        span, therefore it should not block or throw an exception.
+
+        Args:
+            span: The :class:`opentelemetry.trace.Span` that just ended.
+        """
+
+    def shutdown(self) -> None:
+        """Called when a :class:`opentelemetry.sdk.trace.TracerProvider` is shutdown."""
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Export all ended spans to the configured Exporter that have not yet
+        been exported.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for spans to be
+                exported.
+
+        Returns:
+            False if the timeout is exceeded, True otherwise.
+        """
+
+
+# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved
+# pylint:disable=no-member
+class SynchronousMultiSpanProcessor(SpanProcessor):
+    """Implementation of class:`SpanProcessor` that forwards all received
+    events to a list of span processors sequentially.
+
+    The underlying span processors are called in sequential order as they were
+    added.
+    """
+
+    _span_processors: Tuple[SpanProcessor, ...]
+
+    def __init__(self):
+        # use a tuple to avoid race conditions when adding a new span and
+        # iterating through it on "on_start" and "on_end".
+        self._span_processors = ()
+        self._lock = threading.Lock()
+
+    def add_span_processor(self, span_processor: SpanProcessor) -> None:
+        """Adds a SpanProcessor to the list handled by this instance."""
+        with self._lock:
+            self._span_processors += (span_processor,)
+
+    def on_start(
+        self,
+        span: "Span",
+        parent_context: Optional[context_api.Context] = None,
+    ) -> None:
+        for sp in self._span_processors:
+            sp.on_start(span, parent_context=parent_context)
+
+    def on_end(self, span: "ReadableSpan") -> None:
+        for sp in self._span_processors:
+            sp.on_end(span)
+
+    def shutdown(self) -> None:
+        """Sequentially shuts down all underlying span processors."""
+        for sp in self._span_processors:
+            sp.shutdown()
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Sequentially calls force_flush on all underlying
+        :class:`SpanProcessor`
+
+        Args:
+            timeout_millis: The maximum amount of time over all span processors
+                to wait for spans to be exported. In case the first n span
+                processors exceeded the timeout followup span processors will be
+                skipped.
+
+        Returns:
+            True if all span processors flushed their spans within the
+            given timeout, False otherwise.
+        """
+        deadline_ns = time_ns() + timeout_millis * 1000000
+        for sp in self._span_processors:
+            current_time_ns = time_ns()
+            if current_time_ns >= deadline_ns:
+                return False
+
+            if not sp.force_flush((deadline_ns - current_time_ns) // 1000000):
+                return False
+
+        return True
+
+
+class ConcurrentMultiSpanProcessor(SpanProcessor):
+    """Implementation of :class:`SpanProcessor` that forwards all received
+    events to a list of span processors in parallel.
+
+    Calls to the underlying span processors are forwarded in parallel by
+    submitting them to a thread pool executor and waiting until each span
+    processor finished its work.
+
+    Args:
+        num_threads: The number of threads managed by the thread pool executor
+            and thus defining how many span processors can work in parallel.
+    """
+
+    def __init__(self, num_threads: int = 2):
+        # use a tuple to avoid race conditions when adding a new span and
+        # iterating through it on "on_start" and "on_end".
+        self._span_processors = ()  # type: Tuple[SpanProcessor, ...]
+        self._lock = threading.Lock()
+        self._executor = concurrent.futures.ThreadPoolExecutor(
+            max_workers=num_threads
+        )
+
+    def add_span_processor(self, span_processor: SpanProcessor) -> None:
+        """Adds a SpanProcessor to the list handled by this instance."""
+        with self._lock:
+            self._span_processors += (span_processor,)
+
+    def _submit_and_await(
+        self,
+        func: Callable[[SpanProcessor], Callable[..., None]],
+        *args: Any,
+        **kwargs: Any,
+    ):
+        futures = []
+        for sp in self._span_processors:
+            future = self._executor.submit(func(sp), *args, **kwargs)
+            futures.append(future)
+        for future in futures:
+            future.result()
+
+    def on_start(
+        self,
+        span: "Span",
+        parent_context: Optional[context_api.Context] = None,
+    ) -> None:
+        self._submit_and_await(
+            lambda sp: sp.on_start, span, parent_context=parent_context
+        )
+
+    def on_end(self, span: "ReadableSpan") -> None:
+        self._submit_and_await(lambda sp: sp.on_end, span)
+
+    def shutdown(self) -> None:
+        """Shuts down all underlying span processors in parallel."""
+        self._submit_and_await(lambda sp: sp.shutdown)
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Calls force_flush on all underlying span processors in parallel.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for spans to be
+                exported.
+
+        Returns:
+            True if all span processors flushed their spans within the given
+            timeout, False otherwise.
+        """
+        futures = []
+        for sp in self._span_processors:  # type: SpanProcessor
+            future = self._executor.submit(sp.force_flush, timeout_millis)
+            futures.append(future)
+
+        timeout_sec = timeout_millis / 1e3
+        done_futures, not_done_futures = concurrent.futures.wait(
+            futures, timeout_sec
+        )
+        if not_done_futures:
+            return False
+
+        for future in done_futures:
+            if not future.result():
+                return False
+
+        return True
+
+
+class EventBase(abc.ABC):
+    def __init__(self, name: str, timestamp: Optional[int] = None) -> None:
+        self._name = name
+        if timestamp is None:
+            self._timestamp = time_ns()
+        else:
+            self._timestamp = timestamp
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    @property
+    def timestamp(self) -> int:
+        return self._timestamp
+
+    @property
+    @abc.abstractmethod
+    def attributes(self) -> types.Attributes:
+        pass
+
+
+class Event(EventBase):
+    """A text annotation with a set of attributes. The attributes of an event
+    are immutable.
+
+    Args:
+        name: Name of the event.
+        attributes: Attributes of the event.
+        timestamp: Timestamp of the event. If `None` it will filled
+            automatically.
+    """
+
+    def __init__(
+        self,
+        name: str,
+        attributes: types.Attributes = None,
+        timestamp: Optional[int] = None,
+        limit: Optional[int] = _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT,
+    ) -> None:
+        super().__init__(name, timestamp)
+        self._attributes = attributes
+
+    @property
+    def attributes(self) -> types.Attributes:
+        return self._attributes
+
+    @property
+    def dropped_attributes(self) -> int:
+        if isinstance(self._attributes, BoundedAttributes):
+            return self._attributes.dropped
+        return 0
+
+
+def _check_span_ended(func):
+    def wrapper(self, *args, **kwargs):
+        already_ended = False
+        with self._lock:  # pylint: disable=protected-access
+            if self._end_time is None:  # pylint: disable=protected-access
+                func(self, *args, **kwargs)
+            else:
+                already_ended = True
+
+        if already_ended:
+            logger.warning("Tried calling %s on an ended span.", func.__name__)
+
+    return wrapper
+
+
+def _is_valid_link(context: SpanContext, attributes: types.Attributes) -> bool:
+    return bool(
+        context and (context.is_valid or (attributes or context.trace_state))
+    )
+
+
+class ReadableSpan:
+    """Provides read-only access to span attributes.
+
+    Users should NOT be creating these objects directly. `ReadableSpan`s are created as
+    a direct result from using the tracing pipeline via the `Tracer`.
+
+    """
+
+    def __init__(
+        self,
+        name: str,
+        context: Optional[trace_api.SpanContext] = None,
+        parent: Optional[trace_api.SpanContext] = None,
+        resource: Optional[Resource] = None,
+        attributes: types.Attributes = None,
+        events: Sequence[Event] = (),
+        links: Sequence[trace_api.Link] = (),
+        kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
+        instrumentation_info: Optional[InstrumentationInfo] = None,
+        status: Status = Status(StatusCode.UNSET),
+        start_time: Optional[int] = None,
+        end_time: Optional[int] = None,
+        instrumentation_scope: Optional[InstrumentationScope] = None,
+    ) -> None:
+        self._name = name
+        self._context = context
+        self._kind = kind
+        self._instrumentation_info = instrumentation_info
+        self._instrumentation_scope = instrumentation_scope
+        self._parent = parent
+        self._start_time = start_time
+        self._end_time = end_time
+        self._attributes = attributes
+        self._events = events
+        self._links = links
+        if resource is None:
+            self._resource = Resource.create({})
+        else:
+            self._resource = resource
+        self._status = status
+
+    @property
+    def dropped_attributes(self) -> int:
+        if isinstance(self._attributes, BoundedAttributes):
+            return self._attributes.dropped
+        return 0
+
+    @property
+    def dropped_events(self) -> int:
+        if isinstance(self._events, BoundedList):
+            return self._events.dropped
+        return 0
+
+    @property
+    def dropped_links(self) -> int:
+        if isinstance(self._links, BoundedList):
+            return self._links.dropped
+        return 0
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    def get_span_context(self):
+        return self._context
+
+    @property
+    def context(self):
+        return self._context
+
+    @property
+    def kind(self) -> trace_api.SpanKind:
+        return self._kind
+
+    @property
+    def parent(self) -> Optional[trace_api.SpanContext]:
+        return self._parent
+
+    @property
+    def start_time(self) -> Optional[int]:
+        return self._start_time
+
+    @property
+    def end_time(self) -> Optional[int]:
+        return self._end_time
+
+    @property
+    def status(self) -> trace_api.Status:
+        return self._status
+
+    @property
+    def attributes(self) -> types.Attributes:
+        return MappingProxyType(self._attributes or {})
+
+    @property
+    def events(self) -> Sequence[Event]:
+        return tuple(event for event in self._events)
+
+    @property
+    def links(self) -> Sequence[trace_api.Link]:
+        return tuple(link for link in self._links)
+
+    @property
+    def resource(self) -> Resource:
+        return self._resource
+
+    @property
+    @deprecated(
+        version="1.11.1", reason="You should use instrumentation_scope"
+    )
+    def instrumentation_info(self) -> Optional[InstrumentationInfo]:
+        return self._instrumentation_info
+
+    @property
+    def instrumentation_scope(self) -> Optional[InstrumentationScope]:
+        return self._instrumentation_scope
+
+    def to_json(self, indent: Optional[int] = 4):
+        parent_id = None
+        if self.parent is not None:
+            parent_id = f"0x{trace_api.format_span_id(self.parent.span_id)}"
+
+        start_time = None
+        if self._start_time:
+            start_time = util.ns_to_iso_str(self._start_time)
+
+        end_time = None
+        if self._end_time:
+            end_time = util.ns_to_iso_str(self._end_time)
+
+        status = {
+            "status_code": str(self._status.status_code.name),
+        }
+        if self._status.description:
+            status["description"] = self._status.description
+
+        f_span = {
+            "name": self._name,
+            "context": (
+                self._format_context(self._context) if self._context else None
+            ),
+            "kind": str(self.kind),
+            "parent_id": parent_id,
+            "start_time": start_time,
+            "end_time": end_time,
+            "status": status,
+            "attributes": self._format_attributes(self._attributes),
+            "events": self._format_events(self._events),
+            "links": self._format_links(self._links),
+            "resource": json.loads(self.resource.to_json()),
+        }
+
+        return json.dumps(f_span, indent=indent)
+
+    @staticmethod
+    def _format_context(context: SpanContext) -> Dict[str, str]:
+        return {
+            "trace_id": f"0x{trace_api.format_trace_id(context.trace_id)}",
+            "span_id": f"0x{trace_api.format_span_id(context.span_id)}",
+            "trace_state": repr(context.trace_state),
+        }
+
+    @staticmethod
+    def _format_attributes(
+        attributes: types.Attributes,
+    ) -> Optional[Dict[str, Any]]:
+        if attributes is not None and not isinstance(attributes, dict):
+            return dict(attributes)
+        return attributes
+
+    @staticmethod
+    def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]:
+        return [
+            {
+                "name": event.name,
+                "timestamp": util.ns_to_iso_str(event.timestamp),
+                "attributes": Span._format_attributes(  # pylint: disable=protected-access
+                    event.attributes
+                ),
+            }
+            for event in events
+        ]
+
+    @staticmethod
+    def _format_links(links: Sequence[trace_api.Link]) -> List[Dict[str, Any]]:
+        return [
+            {
+                "context": Span._format_context(  # pylint: disable=protected-access
+                    link.context
+                ),
+                "attributes": Span._format_attributes(  # pylint: disable=protected-access
+                    link.attributes
+                ),
+            }
+            for link in links
+        ]
+
+
+class SpanLimits:
+    """The limits that should be enforce on recorded data such as events, links, attributes etc.
+
+    This class does not enforce any limits itself. It only provides an a way read limits from env,
+    default values and from user provided arguments.
+
+    All limit arguments must be either a non-negative integer, ``None`` or ``SpanLimits.UNSET``.
+
+    - All limit arguments are optional.
+    - If a limit argument is not set, the class will try to read its value from the corresponding
+      environment variable.
+    - If the environment variable is not set, the default value, if any, will be used.
+
+    Limit precedence:
+
+    - If a model specific limit is set, it will be used.
+    - Else if the corresponding global limit is set, it will be used.
+    - Else if the model specific limit has a default value, the default value will be used.
+    - Else if the global limit has a default value, the default value will be used.
+
+    Args:
+        max_attributes: Maximum number of attributes that can be added to a span, event, and link.
+            Environment variable: OTEL_ATTRIBUTE_COUNT_LIMIT
+            Default: {_DEFAULT_ATTRIBUTE_COUNT_LIMIT}
+        max_events: Maximum number of events that can be added to a Span.
+            Environment variable: OTEL_SPAN_EVENT_COUNT_LIMIT
+            Default: {_DEFAULT_SPAN_EVENT_COUNT_LIMIT}
+        max_links: Maximum number of links that can be added to a Span.
+            Environment variable: OTEL_SPAN_LINK_COUNT_LIMIT
+            Default: {_DEFAULT_SPAN_LINK_COUNT_LIMIT}
+        max_span_attributes: Maximum number of attributes that can be added to a Span.
+            Environment variable: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT
+            Default: {_DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT}
+        max_event_attributes: Maximum number of attributes that can be added to an Event.
+            Default: {_DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT}
+        max_link_attributes: Maximum number of attributes that can be added to a Link.
+            Default: {_DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT}
+        max_attribute_length: Maximum length an attribute value can have. Values longer than
+            the specified length will be truncated.
+        max_span_attribute_length: Maximum length a span attribute value can have. Values longer than
+            the specified length will be truncated.
+    """
+
+    UNSET = -1
+
+    def __init__(
+        self,
+        max_attributes: Optional[int] = None,
+        max_events: Optional[int] = None,
+        max_links: Optional[int] = None,
+        max_span_attributes: Optional[int] = None,
+        max_event_attributes: Optional[int] = None,
+        max_link_attributes: Optional[int] = None,
+        max_attribute_length: Optional[int] = None,
+        max_span_attribute_length: Optional[int] = None,
+    ):
+        # span events and links count
+        self.max_events = self._from_env_if_absent(
+            max_events,
+            OTEL_SPAN_EVENT_COUNT_LIMIT,
+            _DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT,
+        )
+        self.max_links = self._from_env_if_absent(
+            max_links,
+            OTEL_SPAN_LINK_COUNT_LIMIT,
+            _DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT,
+        )
+
+        # attribute count
+        global_max_attributes = self._from_env_if_absent(
+            max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT
+        )
+        self.max_attributes = (
+            global_max_attributes
+            if global_max_attributes is not None
+            else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT
+        )
+
+        self.max_span_attributes = self._from_env_if_absent(
+            max_span_attributes,
+            OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT,
+            (
+                global_max_attributes
+                if global_max_attributes is not None
+                else _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT
+            ),
+        )
+        self.max_event_attributes = self._from_env_if_absent(
+            max_event_attributes,
+            OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT,
+            (
+                global_max_attributes
+                if global_max_attributes is not None
+                else _DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT
+            ),
+        )
+        self.max_link_attributes = self._from_env_if_absent(
+            max_link_attributes,
+            OTEL_LINK_ATTRIBUTE_COUNT_LIMIT,
+            (
+                global_max_attributes
+                if global_max_attributes is not None
+                else _DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT
+            ),
+        )
+
+        # attribute length
+        self.max_attribute_length = self._from_env_if_absent(
+            max_attribute_length,
+            OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+        )
+        self.max_span_attribute_length = self._from_env_if_absent(
+            max_span_attribute_length,
+            OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT,
+            # use global attribute length limit as default
+            self.max_attribute_length,
+        )
+
+    def __repr__(self):
+        return f"{type(self).__name__}(max_span_attributes={self.max_span_attributes}, max_events_attributes={self.max_event_attributes}, max_link_attributes={self.max_link_attributes}, max_attributes={self.max_attributes}, max_events={self.max_events}, max_links={self.max_links}, max_attribute_length={self.max_attribute_length})"
+
+    @classmethod
+    def _from_env_if_absent(
+        cls, value: Optional[int], env_var: str, default: Optional[int] = None
+    ) -> Optional[int]:
+        if value == cls.UNSET:
+            return None
+
+        err_msg = "{} must be a non-negative integer but got {}"
+
+        # if no value is provided for the limit, try to load it from env
+        if value is None:
+            # return default value if env var is not set
+            if env_var not in environ:
+                return default
+
+            str_value = environ.get(env_var, "").strip().lower()
+            if str_value == _ENV_VALUE_UNSET:
+                return None
+
+            try:
+                value = int(str_value)
+            except ValueError:
+                raise ValueError(err_msg.format(env_var, str_value))
+
+        if value < 0:
+            raise ValueError(err_msg.format(env_var, value))
+        return value
+
+
+_UnsetLimits = SpanLimits(
+    max_attributes=SpanLimits.UNSET,
+    max_events=SpanLimits.UNSET,
+    max_links=SpanLimits.UNSET,
+    max_span_attributes=SpanLimits.UNSET,
+    max_event_attributes=SpanLimits.UNSET,
+    max_link_attributes=SpanLimits.UNSET,
+    max_attribute_length=SpanLimits.UNSET,
+    max_span_attribute_length=SpanLimits.UNSET,
+)
+
+# not removed for backward compat. please use SpanLimits instead.
+SPAN_ATTRIBUTE_COUNT_LIMIT = SpanLimits._from_env_if_absent(  # pylint: disable=protected-access
+    None,
+    OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT,
+    _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT,
+)
+
+
+class Span(trace_api.Span, ReadableSpan):
+    """See `opentelemetry.trace.Span`.
+
+    Users should create `Span` objects via the `Tracer` instead of this
+    constructor.
+
+    Args:
+        name: The name of the operation this span represents
+        context: The immutable span context
+        parent: This span's parent's `opentelemetry.trace.SpanContext`, or
+            None if this is a root span
+        sampler: The sampler used to create this span
+        trace_config: TODO
+        resource: Entity producing telemetry
+        attributes: The span's attributes to be exported
+        events: Timestamped events to be exported
+        links: Links to other spans to be exported
+        span_processor: `SpanProcessor` to invoke when starting and ending
+            this `Span`.
+        limits: `SpanLimits` instance that was passed to the `TracerProvider`
+    """
+
+    def __new__(cls, *args, **kwargs):
+        if cls is Span:
+            raise TypeError("Span must be instantiated via a tracer.")
+        return super().__new__(cls)
+
+    # pylint: disable=too-many-locals
+    def __init__(
+        self,
+        name: str,
+        context: trace_api.SpanContext,
+        parent: Optional[trace_api.SpanContext] = None,
+        sampler: Optional[sampling.Sampler] = None,
+        trace_config: None = None,  # TODO
+        resource: Optional[Resource] = None,
+        attributes: types.Attributes = None,
+        events: Optional[Sequence[Event]] = None,
+        links: Sequence[trace_api.Link] = (),
+        kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
+        span_processor: SpanProcessor = SpanProcessor(),
+        instrumentation_info: Optional[InstrumentationInfo] = None,
+        record_exception: bool = True,
+        set_status_on_exception: bool = True,
+        limits=_UnsetLimits,
+        instrumentation_scope: Optional[InstrumentationScope] = None,
+    ) -> None:
+        if resource is None:
+            resource = Resource.create({})
+        super().__init__(
+            name=name,
+            context=context,
+            parent=parent,
+            kind=kind,
+            resource=resource,
+            instrumentation_info=instrumentation_info,
+            instrumentation_scope=instrumentation_scope,
+        )
+        self._sampler = sampler
+        self._trace_config = trace_config
+        self._record_exception = record_exception
+        self._set_status_on_exception = set_status_on_exception
+        self._span_processor = span_processor
+        self._limits = limits
+        self._lock = threading.Lock()
+        self._attributes = BoundedAttributes(
+            self._limits.max_span_attributes,
+            attributes,
+            immutable=False,
+            max_value_len=self._limits.max_span_attribute_length,
+        )
+        self._events = self._new_events()
+        if events:
+            for event in events:
+                event._attributes = BoundedAttributes(
+                    self._limits.max_event_attributes,
+                    event.attributes,
+                    max_value_len=self._limits.max_attribute_length,
+                )
+                self._events.append(event)
+
+        self._links = self._new_links(links)
+
+    def __repr__(self):
+        return f'{type(self).__name__}(name="{self._name}", context={self._context})'
+
+    def _new_events(self):
+        return BoundedList(self._limits.max_events)
+
+    def _new_links(self, links: Sequence[trace_api.Link]):
+        if not links:
+            return BoundedList(self._limits.max_links)
+
+        valid_links = []
+        for link in links:
+            if link and _is_valid_link(link.context, link.attributes):
+                # pylint: disable=protected-access
+                link._attributes = BoundedAttributes(
+                    self._limits.max_link_attributes,
+                    link.attributes,
+                    max_value_len=self._limits.max_attribute_length,
+                )
+                valid_links.append(link)
+
+        return BoundedList.from_seq(self._limits.max_links, valid_links)
+
+    def get_span_context(self):
+        return self._context
+
+    def set_attributes(
+        self, attributes: Mapping[str, types.AttributeValue]
+    ) -> None:
+        with self._lock:
+            if self._end_time is not None:
+                logger.warning("Setting attribute on ended span.")
+                return
+
+            for key, value in attributes.items():
+                self._attributes[key] = value
+
+    def set_attribute(self, key: str, value: types.AttributeValue) -> None:
+        return self.set_attributes({key: value})
+
+    @_check_span_ended
+    def _add_event(self, event: EventBase) -> None:
+        self._events.append(event)
+
+    def add_event(
+        self,
+        name: str,
+        attributes: types.Attributes = None,
+        timestamp: Optional[int] = None,
+    ) -> None:
+        attributes = BoundedAttributes(
+            self._limits.max_event_attributes,
+            attributes,
+            max_value_len=self._limits.max_attribute_length,
+        )
+        self._add_event(
+            Event(
+                name=name,
+                attributes=attributes,
+                timestamp=timestamp,
+            )
+        )
+
+    @_check_span_ended
+    def _add_link(self, link: trace_api.Link) -> None:
+        self._links.append(link)
+
+    def add_link(
+        self,
+        context: SpanContext,
+        attributes: types.Attributes = None,
+    ) -> None:
+        if not _is_valid_link(context, attributes):
+            return
+
+        attributes = BoundedAttributes(
+            self._limits.max_link_attributes,
+            attributes,
+            max_value_len=self._limits.max_attribute_length,
+        )
+        self._add_link(
+            trace_api.Link(
+                context=context,
+                attributes=attributes,
+            )
+        )
+
+    def _readable_span(self) -> ReadableSpan:
+        return ReadableSpan(
+            name=self._name,
+            context=self._context,
+            parent=self._parent,
+            resource=self._resource,
+            attributes=self._attributes,
+            events=self._events,
+            links=self._links,
+            kind=self.kind,
+            status=self._status,
+            start_time=self._start_time,
+            end_time=self._end_time,
+            instrumentation_info=self._instrumentation_info,
+            instrumentation_scope=self._instrumentation_scope,
+        )
+
+    def start(
+        self,
+        start_time: Optional[int] = None,
+        parent_context: Optional[context_api.Context] = None,
+    ) -> None:
+        with self._lock:
+            if self._start_time is not None:
+                logger.warning("Calling start() on a started span.")
+                return
+            self._start_time = (
+                start_time if start_time is not None else time_ns()
+            )
+
+        self._span_processor.on_start(self, parent_context=parent_context)
+
+    def end(self, end_time: Optional[int] = None) -> None:
+        with self._lock:
+            if self._start_time is None:
+                raise RuntimeError("Calling end() on a not started span.")
+            if self._end_time is not None:
+                logger.warning("Calling end() on an ended span.")
+                return
+
+            self._end_time = end_time if end_time is not None else time_ns()
+
+        self._span_processor.on_end(self._readable_span())
+
+    @_check_span_ended
+    def update_name(self, name: str) -> None:
+        self._name = name
+
+    def is_recording(self) -> bool:
+        return self._end_time is None
+
+    @_check_span_ended
+    def set_status(
+        self,
+        status: typing.Union[Status, StatusCode],
+        description: typing.Optional[str] = None,
+    ) -> None:
+        # Ignore future calls if status is already set to OK
+        # Ignore calls to set to StatusCode.UNSET
+        if isinstance(status, Status):
+            if (
+                self._status
+                and self._status.status_code is StatusCode.OK
+                or status.status_code is StatusCode.UNSET
+            ):
+                return
+            if description is not None:
+                logger.warning(
+                    "Description %s ignored. Use either `Status` or `(StatusCode, Description)`",
+                    description,
+                )
+            self._status = status
+        elif isinstance(status, StatusCode):
+            if (
+                self._status
+                and self._status.status_code is StatusCode.OK
+                or status is StatusCode.UNSET
+            ):
+                return
+            self._status = Status(status, description)
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        """Ends context manager and calls `end` on the `Span`."""
+        if exc_val is not None and self.is_recording():
+            # Record the exception as an event
+            # pylint:disable=protected-access
+            if self._record_exception:
+                self.record_exception(exception=exc_val, escaped=True)
+            # Records status if span is used as context manager
+            # i.e. with tracer.start_span() as span:
+            if self._set_status_on_exception:
+                self.set_status(
+                    Status(
+                        status_code=StatusCode.ERROR,
+                        description=f"{exc_type.__name__}: {exc_val}",
+                    )
+                )
+
+        super().__exit__(exc_type, exc_val, exc_tb)
+
+    def record_exception(
+        self,
+        exception: BaseException,
+        attributes: types.Attributes = None,
+        timestamp: Optional[int] = None,
+        escaped: bool = False,
+    ) -> None:
+        """Records an exception as a span event."""
+        # TODO: keep only exception as first argument after baseline is 3.10
+        stacktrace = "".join(
+            traceback.format_exception(
+                type(exception), value=exception, tb=exception.__traceback__
+            )
+        )
+        module = type(exception).__module__
+        qualname = type(exception).__qualname__
+        exception_type = (
+            f"{module}.{qualname}"
+            if module and module != "builtins"
+            else qualname
+        )
+        _attributes: MutableMapping[str, types.AttributeValue] = {
+            EXCEPTION_TYPE: exception_type,
+            EXCEPTION_MESSAGE: str(exception),
+            EXCEPTION_STACKTRACE: stacktrace,
+            EXCEPTION_ESCAPED: str(escaped),
+        }
+        if attributes:
+            _attributes.update(attributes)
+        self.add_event(
+            name="exception", attributes=_attributes, timestamp=timestamp
+        )
+
+
+class _Span(Span):
+    """Protected implementation of `opentelemetry.trace.Span`.
+
+    This constructor exists to prevent the instantiation of the `Span` class
+    by other mechanisms than through the `Tracer`.
+    """
+
+
+class Tracer(trace_api.Tracer):
+    """See `opentelemetry.trace.Tracer`."""
+
+    def __init__(
+        self,
+        sampler: sampling.Sampler,
+        resource: Resource,
+        span_processor: Union[
+            SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor
+        ],
+        id_generator: IdGenerator,
+        instrumentation_info: InstrumentationInfo,
+        span_limits: SpanLimits,
+        instrumentation_scope: InstrumentationScope,
+    ) -> None:
+        self.sampler = sampler
+        self.resource = resource
+        self.span_processor = span_processor
+        self.id_generator = id_generator
+        self.instrumentation_info = instrumentation_info
+        self._span_limits = span_limits
+        self._instrumentation_scope = instrumentation_scope
+
+    @_agnosticcontextmanager  # pylint: disable=protected-access
+    def start_as_current_span(
+        self,
+        name: str,
+        context: Optional[context_api.Context] = None,
+        kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
+        attributes: types.Attributes = None,
+        links: Optional[Sequence[trace_api.Link]] = (),
+        start_time: Optional[int] = None,
+        record_exception: bool = True,
+        set_status_on_exception: bool = True,
+        end_on_exit: bool = True,
+    ) -> Iterator[trace_api.Span]:
+        span = self.start_span(
+            name=name,
+            context=context,
+            kind=kind,
+            attributes=attributes,
+            links=links,
+            start_time=start_time,
+            record_exception=record_exception,
+            set_status_on_exception=set_status_on_exception,
+        )
+        with trace_api.use_span(
+            span,
+            end_on_exit=end_on_exit,
+            record_exception=record_exception,
+            set_status_on_exception=set_status_on_exception,
+        ) as span:
+            yield span
+
+    def start_span(  # pylint: disable=too-many-locals
+        self,
+        name: str,
+        context: Optional[context_api.Context] = None,
+        kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
+        attributes: types.Attributes = None,
+        links: Optional[Sequence[trace_api.Link]] = (),
+        start_time: Optional[int] = None,
+        record_exception: bool = True,
+        set_status_on_exception: bool = True,
+    ) -> trace_api.Span:
+        parent_span_context = trace_api.get_current_span(
+            context
+        ).get_span_context()
+
+        if parent_span_context is not None and not isinstance(
+            parent_span_context, trace_api.SpanContext
+        ):
+            raise TypeError(
+                "parent_span_context must be a SpanContext or None."
+            )
+
+        # is_valid determines root span
+        if parent_span_context is None or not parent_span_context.is_valid:
+            parent_span_context = None
+            trace_id = self.id_generator.generate_trace_id()
+        else:
+            trace_id = parent_span_context.trace_id
+
+        # The sampler decides whether to create a real or no-op span at the
+        # time of span creation. No-op spans do not record events, and are not
+        # exported.
+        # The sampler may also add attributes to the newly-created span, e.g.
+        # to include information about the sampling result.
+        # The sampler may also modify the parent span context's tracestate
+        sampling_result = self.sampler.should_sample(
+            context, trace_id, name, kind, attributes, links
+        )
+
+        trace_flags = (
+            trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED)
+            if sampling_result.decision.is_sampled()
+            else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT)
+        )
+        span_context = trace_api.SpanContext(
+            trace_id,
+            self.id_generator.generate_span_id(),
+            is_remote=False,
+            trace_flags=trace_flags,
+            trace_state=sampling_result.trace_state,
+        )
+
+        # Only record if is_recording() is true
+        if sampling_result.decision.is_recording():
+            # pylint:disable=protected-access
+            span = _Span(
+                name=name,
+                context=span_context,
+                parent=parent_span_context,
+                sampler=self.sampler,
+                resource=self.resource,
+                attributes=sampling_result.attributes.copy(),
+                span_processor=self.span_processor,
+                kind=kind,
+                links=links,
+                instrumentation_info=self.instrumentation_info,
+                record_exception=record_exception,
+                set_status_on_exception=set_status_on_exception,
+                limits=self._span_limits,
+                instrumentation_scope=self._instrumentation_scope,
+            )
+            span.start(start_time=start_time, parent_context=context)
+        else:
+            span = trace_api.NonRecordingSpan(context=span_context)
+        return span
+
+
+class TracerProvider(trace_api.TracerProvider):
+    """See `opentelemetry.trace.TracerProvider`."""
+
+    def __init__(
+        self,
+        sampler: Optional[sampling.Sampler] = None,
+        resource: Optional[Resource] = None,
+        shutdown_on_exit: bool = True,
+        active_span_processor: Union[
+            SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor, None
+        ] = None,
+        id_generator: Optional[IdGenerator] = None,
+        span_limits: Optional[SpanLimits] = None,
+    ) -> None:
+        self._active_span_processor = (
+            active_span_processor or SynchronousMultiSpanProcessor()
+        )
+        if id_generator is None:
+            self.id_generator = RandomIdGenerator()
+        else:
+            self.id_generator = id_generator
+        if resource is None:
+            self._resource = Resource.create({})
+        else:
+            self._resource = resource
+        if not sampler:
+            sampler = sampling._get_from_env_or_default()
+        self.sampler = sampler
+        self._span_limits = span_limits or SpanLimits()
+        disabled = environ.get(OTEL_SDK_DISABLED, "")
+        self._disabled = disabled.lower().strip() == "true"
+        self._atexit_handler = None
+
+        if shutdown_on_exit:
+            self._atexit_handler = atexit.register(self.shutdown)
+
+    @property
+    def resource(self) -> Resource:
+        return self._resource
+
+    def get_tracer(
+        self,
+        instrumenting_module_name: str,
+        instrumenting_library_version: typing.Optional[str] = None,
+        schema_url: typing.Optional[str] = None,
+        attributes: typing.Optional[types.Attributes] = None,
+    ) -> "trace_api.Tracer":
+        if self._disabled:
+            return NoOpTracer()
+        if not instrumenting_module_name:  # Reject empty strings too.
+            instrumenting_module_name = ""
+            logger.error("get_tracer called with missing module name.")
+        if instrumenting_library_version is None:
+            instrumenting_library_version = ""
+
+        filterwarnings(
+            "ignore",
+            message=(
+                r"Call to deprecated method __init__. \(You should use "
+                r"InstrumentationScope\) -- Deprecated since version 1.11.1."
+            ),
+            category=DeprecationWarning,
+            module="opentelemetry.sdk.trace",
+        )
+
+        instrumentation_info = InstrumentationInfo(
+            instrumenting_module_name,
+            instrumenting_library_version,
+            schema_url,
+        )
+
+        return Tracer(
+            self.sampler,
+            self.resource,
+            self._active_span_processor,
+            self.id_generator,
+            instrumentation_info,
+            self._span_limits,
+            InstrumentationScope(
+                instrumenting_module_name,
+                instrumenting_library_version,
+                schema_url,
+                attributes,
+            ),
+        )
+
+    def add_span_processor(self, span_processor: SpanProcessor) -> None:
+        """Registers a new :class:`SpanProcessor` for this `TracerProvider`.
+
+        The span processors are invoked in the same order they are registered.
+        """
+
+        # no lock here because add_span_processor is thread safe for both
+        # SynchronousMultiSpanProcessor and ConcurrentMultiSpanProcessor.
+        self._active_span_processor.add_span_processor(span_processor)
+
+    def shutdown(self) -> None:
+        """Shut down the span processors added to the tracer provider."""
+        self._active_span_processor.shutdown()
+        if self._atexit_handler is not None:
+            atexit.unregister(self._atexit_handler)
+            self._atexit_handler = None
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Requests the active span processor to process all spans that have not
+        yet been processed.
+
+        By default force flush is called sequentially on all added span
+        processors. This means that span processors further back in the list
+        have less time to flush their spans.
+        To have span processors flush their spans in parallel it is possible to
+        initialize the tracer provider with an instance of
+        `ConcurrentMultiSpanProcessor` at the cost of using multiple threads.
+
+        Args:
+            timeout_millis: The maximum amount of time to wait for spans to be
+                processed.
+
+        Returns:
+            False if the timeout is exceeded, True otherwise.
+        """
+        return self._active_span_processor.force_flush(timeout_millis)
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py
new file mode 100644
index 00000000..47d1769a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py
@@ -0,0 +1,517 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+import collections
+import logging
+import os
+import sys
+import threading
+import typing
+from enum import Enum
+from os import environ, linesep
+from time import time_ns
+
+from opentelemetry.context import (
+    _SUPPRESS_INSTRUMENTATION_KEY,
+    Context,
+    attach,
+    detach,
+    set_value,
+)
+from opentelemetry.sdk.environment_variables import (
+    OTEL_BSP_EXPORT_TIMEOUT,
+    OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
+    OTEL_BSP_MAX_QUEUE_SIZE,
+    OTEL_BSP_SCHEDULE_DELAY,
+)
+from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor
+from opentelemetry.util._once import Once
+
+_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000
+_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512
+_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000
+_DEFAULT_MAX_QUEUE_SIZE = 2048
+_ENV_VAR_INT_VALUE_ERROR_MESSAGE = (
+    "Unable to parse value for %s as integer. Defaulting to %s."
+)
+
+logger = logging.getLogger(__name__)
+
+
+class SpanExportResult(Enum):
+    SUCCESS = 0
+    FAILURE = 1
+
+
+class SpanExporter:
+    """Interface for exporting spans.
+
+    Interface to be implemented by services that want to export spans recorded
+    in their own format.
+
+    To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
+    `SimpleSpanProcessor` or a `BatchSpanProcessor`.
+    """
+
+    def export(
+        self, spans: typing.Sequence[ReadableSpan]
+    ) -> "SpanExportResult":
+        """Exports a batch of telemetry data.
+
+        Args:
+            spans: The list of `opentelemetry.trace.Span` objects to be exported
+
+        Returns:
+            The result of the export
+        """
+
+    def shutdown(self) -> None:
+        """Shuts down the exporter.
+
+        Called when the SDK is shut down.
+        """
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        """Hint to ensure that the export of any spans the exporter has received
+        prior to the call to ForceFlush SHOULD be completed as soon as possible, preferably
+        before returning from this method.
+        """
+
+
+class SimpleSpanProcessor(SpanProcessor):
+    """Simple SpanProcessor implementation.
+
+    SimpleSpanProcessor is an implementation of `SpanProcessor` that
+    passes ended spans directly to the configured `SpanExporter`.
+    """
+
+    def __init__(self, span_exporter: SpanExporter):
+        self.span_exporter = span_exporter
+
+    def on_start(
+        self, span: Span, parent_context: typing.Optional[Context] = None
+    ) -> None:
+        pass
+
+    def on_end(self, span: ReadableSpan) -> None:
+        if not span.context.trace_flags.sampled:
+            return
+        token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
+        try:
+            self.span_exporter.export((span,))
+        # pylint: disable=broad-exception-caught
+        except Exception:
+            logger.exception("Exception while exporting Span.")
+        detach(token)
+
+    def shutdown(self) -> None:
+        self.span_exporter.shutdown()
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        # pylint: disable=unused-argument
+        return True
+
+
+class _FlushRequest:
+    """Represents a request for the BatchSpanProcessor to flush spans."""
+
+    __slots__ = ["event", "num_spans"]
+
+    def __init__(self):
+        self.event = threading.Event()
+        self.num_spans = 0
+
+
+_BSP_RESET_ONCE = Once()
+
+
+class BatchSpanProcessor(SpanProcessor):
+    """Batch span processor implementation.
+
+    `BatchSpanProcessor` is an implementation of `SpanProcessor` that
+    batches ended spans and pushes them to the configured `SpanExporter`.
+
+    `BatchSpanProcessor` is configurable with the following environment
+    variables which correspond to constructor parameters:
+
+    - :envvar:`OTEL_BSP_SCHEDULE_DELAY`
+    - :envvar:`OTEL_BSP_MAX_QUEUE_SIZE`
+    - :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
+    - :envvar:`OTEL_BSP_EXPORT_TIMEOUT`
+    """
+
+    def __init__(
+        self,
+        span_exporter: SpanExporter,
+        max_queue_size: int | None = None,
+        schedule_delay_millis: float | None = None,
+        max_export_batch_size: int | None = None,
+        export_timeout_millis: float | None = None,
+    ):
+        if max_queue_size is None:
+            max_queue_size = BatchSpanProcessor._default_max_queue_size()
+
+        if schedule_delay_millis is None:
+            schedule_delay_millis = (
+                BatchSpanProcessor._default_schedule_delay_millis()
+            )
+
+        if max_export_batch_size is None:
+            max_export_batch_size = (
+                BatchSpanProcessor._default_max_export_batch_size()
+            )
+
+        if export_timeout_millis is None:
+            export_timeout_millis = (
+                BatchSpanProcessor._default_export_timeout_millis()
+            )
+
+        BatchSpanProcessor._validate_arguments(
+            max_queue_size, schedule_delay_millis, max_export_batch_size
+        )
+
+        self.span_exporter = span_exporter
+        self.queue = collections.deque([], max_queue_size)  # type: typing.Deque[Span]
+        self.worker_thread = threading.Thread(
+            name="OtelBatchSpanProcessor", target=self.worker, daemon=True
+        )
+        self.condition = threading.Condition(threading.Lock())
+        self._flush_request = None  # type: typing.Optional[_FlushRequest]
+        self.schedule_delay_millis = schedule_delay_millis
+        self.max_export_batch_size = max_export_batch_size
+        self.max_queue_size = max_queue_size
+        self.export_timeout_millis = export_timeout_millis
+        self.done = False
+        # flag that indicates that spans are being dropped
+        self._spans_dropped = False
+        # precallocated list to send spans to exporter
+        self.spans_list = [None] * self.max_export_batch_size  # type: typing.List[typing.Optional[Span]]
+        self.worker_thread.start()
+        if hasattr(os, "register_at_fork"):
+            os.register_at_fork(after_in_child=self._at_fork_reinit)  # pylint: disable=protected-access
+        self._pid = os.getpid()
+
+    def on_start(
+        self, span: Span, parent_context: Context | None = None
+    ) -> None:
+        pass
+
+    def on_end(self, span: ReadableSpan) -> None:
+        if self.done:
+            logger.warning("Already shutdown, dropping span.")
+            return
+        if not span.context.trace_flags.sampled:
+            return
+        if self._pid != os.getpid():
+            _BSP_RESET_ONCE.do_once(self._at_fork_reinit)
+
+        if len(self.queue) == self.max_queue_size:
+            if not self._spans_dropped:
+                logger.warning("Queue is full, likely spans will be dropped.")
+                self._spans_dropped = True
+
+        self.queue.appendleft(span)
+
+        if len(self.queue) >= self.max_export_batch_size:
+            with self.condition:
+                self.condition.notify()
+
+    def _at_fork_reinit(self):
+        self.condition = threading.Condition(threading.Lock())
+        self.queue.clear()
+
+        # worker_thread is local to a process, only the thread that issued fork continues
+        # to exist. A new worker thread must be started in child process.
+        self.worker_thread = threading.Thread(
+            name="OtelBatchSpanProcessor", target=self.worker, daemon=True
+        )
+        self.worker_thread.start()
+        self._pid = os.getpid()
+
+    def worker(self):
+        timeout = self.schedule_delay_millis / 1e3
+        flush_request = None  # type: typing.Optional[_FlushRequest]
+        while not self.done:
+            with self.condition:
+                if self.done:
+                    # done flag may have changed, avoid waiting
+                    break
+                flush_request = self._get_and_unset_flush_request()
+                if (
+                    len(self.queue) < self.max_export_batch_size
+                    and flush_request is None
+                ):
+                    self.condition.wait(timeout)
+                    flush_request = self._get_and_unset_flush_request()
+                    if not self.queue:
+                        # spurious notification, let's wait again, reset timeout
+                        timeout = self.schedule_delay_millis / 1e3
+                        self._notify_flush_request_finished(flush_request)
+                        flush_request = None
+                        continue
+                    if self.done:
+                        # missing spans will be sent when calling flush
+                        break
+
+            # subtract the duration of this export call to the next timeout
+            start = time_ns()
+            self._export(flush_request)
+            end = time_ns()
+            duration = (end - start) / 1e9
+            timeout = self.schedule_delay_millis / 1e3 - duration
+
+            self._notify_flush_request_finished(flush_request)
+            flush_request = None
+
+        # there might have been a new flush request while export was running
+        # and before the done flag switched to true
+        with self.condition:
+            shutdown_flush_request = self._get_and_unset_flush_request()
+
+        # be sure that all spans are sent
+        self._drain_queue()
+        self._notify_flush_request_finished(flush_request)
+        self._notify_flush_request_finished(shutdown_flush_request)
+
+    def _get_and_unset_flush_request(
+        self,
+    ) -> typing.Optional[_FlushRequest]:
+        """Returns the current flush request and makes it invisible to the
+        worker thread for subsequent calls.
+        """
+        flush_request = self._flush_request
+        self._flush_request = None
+        if flush_request is not None:
+            flush_request.num_spans = len(self.queue)
+        return flush_request
+
+    @staticmethod
+    def _notify_flush_request_finished(
+        flush_request: typing.Optional[_FlushRequest],
+    ):
+        """Notifies the flush initiator(s) waiting on the given request/event
+        that the flush operation was finished.
+        """
+        if flush_request is not None:
+            flush_request.event.set()
+
+    def _get_or_create_flush_request(self) -> _FlushRequest:
+        """Either returns the current active flush event or creates a new one.
+
+        The flush event will be visible and read by the worker thread before an
+        export operation starts. Callers of a flush operation may wait on the
+        returned event to be notified when the flush/export operation was
+        finished.
+
+        This method is not thread-safe, i.e. callers need to take care about
+        synchronization/locking.
+        """
+        if self._flush_request is None:
+            self._flush_request = _FlushRequest()
+        return self._flush_request
+
+    def _export(self, flush_request: typing.Optional[_FlushRequest]):
+        """Exports spans considering the given flush_request.
+
+        In case of a given flush_requests spans are exported in batches until
+        the number of exported spans reached or exceeded the number of spans in
+        the flush request.
+        In no flush_request was given at most max_export_batch_size spans are
+        exported.
+        """
+        if not flush_request:
+            self._export_batch()
+            return
+
+        num_spans = flush_request.num_spans
+        while self.queue:
+            num_exported = self._export_batch()
+            num_spans -= num_exported
+
+            if num_spans <= 0:
+                break
+
+    def _export_batch(self) -> int:
+        """Exports at most max_export_batch_size spans and returns the number of
+        exported spans.
+        """
+        idx = 0
+        # currently only a single thread acts as consumer, so queue.pop() will
+        # not raise an exception
+        while idx < self.max_export_batch_size and self.queue:
+            self.spans_list[idx] = self.queue.pop()
+            idx += 1
+        token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
+        try:
+            # Ignore type b/c the Optional[None]+slicing is too "clever"
+            # for mypy
+            self.span_exporter.export(self.spans_list[:idx])  # type: ignore
+        except Exception:  # pylint: disable=broad-exception-caught
+            logger.exception("Exception while exporting Span batch.")
+        detach(token)
+
+        # clean up list
+        for index in range(idx):
+            self.spans_list[index] = None
+        return idx
+
+    def _drain_queue(self):
+        """Export all elements until queue is empty.
+
+        Can only be called from the worker thread context because it invokes
+        `export` that is not thread safe.
+        """
+        while self.queue:
+            self._export_batch()
+
+    def force_flush(self, timeout_millis: int | None = None) -> bool:
+        if timeout_millis is None:
+            timeout_millis = self.export_timeout_millis
+
+        if self.done:
+            logger.warning("Already shutdown, ignoring call to force_flush().")
+            return True
+
+        with self.condition:
+            flush_request = self._get_or_create_flush_request()
+            # signal the worker thread to flush and wait for it to finish
+            self.condition.notify_all()
+
+        # wait for token to be processed
+        ret = flush_request.event.wait(timeout_millis / 1e3)
+        if not ret:
+            logger.warning("Timeout was exceeded in force_flush().")
+        return ret
+
+    def shutdown(self) -> None:
+        # signal the worker thread to finish and then wait for it
+        self.done = True
+        with self.condition:
+            self.condition.notify_all()
+        self.worker_thread.join()
+        self.span_exporter.shutdown()
+
+    @staticmethod
+    def _default_max_queue_size():
+        try:
+            return int(
+                environ.get(OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE)
+            )
+        except ValueError:
+            logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BSP_MAX_QUEUE_SIZE,
+                _DEFAULT_MAX_QUEUE_SIZE,
+            )
+            return _DEFAULT_MAX_QUEUE_SIZE
+
+    @staticmethod
+    def _default_schedule_delay_millis():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BSP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS
+                )
+            )
+        except ValueError:
+            logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BSP_SCHEDULE_DELAY,
+                _DEFAULT_SCHEDULE_DELAY_MILLIS,
+            )
+            return _DEFAULT_SCHEDULE_DELAY_MILLIS
+
+    @staticmethod
+    def _default_max_export_batch_size():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
+                    _DEFAULT_MAX_EXPORT_BATCH_SIZE,
+                )
+            )
+        except ValueError:
+            logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
+                _DEFAULT_MAX_EXPORT_BATCH_SIZE,
+            )
+            return _DEFAULT_MAX_EXPORT_BATCH_SIZE
+
+    @staticmethod
+    def _default_export_timeout_millis():
+        try:
+            return int(
+                environ.get(
+                    OTEL_BSP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS
+                )
+            )
+        except ValueError:
+            logger.exception(
+                _ENV_VAR_INT_VALUE_ERROR_MESSAGE,
+                OTEL_BSP_EXPORT_TIMEOUT,
+                _DEFAULT_EXPORT_TIMEOUT_MILLIS,
+            )
+            return _DEFAULT_EXPORT_TIMEOUT_MILLIS
+
+    @staticmethod
+    def _validate_arguments(
+        max_queue_size, schedule_delay_millis, max_export_batch_size
+    ):
+        if max_queue_size <= 0:
+            raise ValueError("max_queue_size must be a positive integer.")
+
+        if schedule_delay_millis <= 0:
+            raise ValueError("schedule_delay_millis must be positive.")
+
+        if max_export_batch_size <= 0:
+            raise ValueError(
+                "max_export_batch_size must be a positive integer."
+            )
+
+        if max_export_batch_size > max_queue_size:
+            raise ValueError(
+                "max_export_batch_size must be less than or equal to max_queue_size."
+            )
+
+
+class ConsoleSpanExporter(SpanExporter):
+    """Implementation of :class:`SpanExporter` that prints spans to the
+    console.
+
+    This class can be used for diagnostic purposes. It prints the exported
+    spans to the console STDOUT.
+    """
+
+    def __init__(
+        self,
+        service_name: str | None = None,
+        out: typing.IO = sys.stdout,
+        formatter: typing.Callable[
+            [ReadableSpan], str
+        ] = lambda span: span.to_json() + linesep,
+    ):
+        self.out = out
+        self.formatter = formatter
+        self.service_name = service_name
+
+    def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
+        for span in spans:
+            self.out.write(self.formatter(span))
+        self.out.flush()
+        return SpanExportResult.SUCCESS
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        return True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py
new file mode 100644
index 00000000..c28ecfd2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py
@@ -0,0 +1,61 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+import typing
+
+from opentelemetry.sdk.trace import ReadableSpan
+from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
+
+
+class InMemorySpanExporter(SpanExporter):
+    """Implementation of :class:`.SpanExporter` that stores spans in memory.
+
+    This class can be used for testing purposes. It stores the exported spans
+    in a list in memory that can be retrieved using the
+    :func:`.get_finished_spans` method.
+    """
+
+    def __init__(self) -> None:
+        self._finished_spans: typing.List[ReadableSpan] = []
+        self._stopped = False
+        self._lock = threading.Lock()
+
+    def clear(self) -> None:
+        """Clear list of collected spans."""
+        with self._lock:
+            self._finished_spans.clear()
+
+    def get_finished_spans(self) -> typing.Tuple[ReadableSpan, ...]:
+        """Get list of collected spans."""
+        with self._lock:
+            return tuple(self._finished_spans)
+
+    def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
+        """Stores a list of spans in memory."""
+        if self._stopped:
+            return SpanExportResult.FAILURE
+        with self._lock:
+            self._finished_spans.extend(spans)
+        return SpanExportResult.SUCCESS
+
+    def shutdown(self) -> None:
+        """Shut downs the exporter.
+
+        Calls to export after the exporter has been shut down will fail.
+        """
+        self._stopped = True
+
+    def force_flush(self, timeout_millis: int = 30000) -> bool:
+        return True
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py
new file mode 100644
index 00000000..cd1f89bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py
@@ -0,0 +1,60 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import random
+
+from opentelemetry import trace
+
+
+class IdGenerator(abc.ABC):
+    @abc.abstractmethod
+    def generate_span_id(self) -> int:
+        """Get a new span ID.
+
+        Returns:
+            A 64-bit int for use as a span ID
+        """
+
+    @abc.abstractmethod
+    def generate_trace_id(self) -> int:
+        """Get a new trace ID.
+
+        Implementations should at least make the 64 least significant bits
+        uniformly random. Samplers like the `TraceIdRatioBased` sampler rely on
+        this randomness to make sampling decisions.
+
+        See `the specification on TraceIdRatioBased <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#traceidratiobased>`_.
+
+        Returns:
+            A 128-bit int for use as a trace ID
+        """
+
+
+class RandomIdGenerator(IdGenerator):
+    """The default ID generator for TracerProvider which randomly generates all
+    bits when generating IDs.
+    """
+
+    def generate_span_id(self) -> int:
+        span_id = random.getrandbits(64)
+        while span_id == trace.INVALID_SPAN_ID:
+            span_id = random.getrandbits(64)
+        return span_id
+
+    def generate_trace_id(self) -> int:
+        trace_id = random.getrandbits(128)
+        while trace_id == trace.INVALID_TRACE_ID:
+            trace_id = random.getrandbits(128)
+        return trace_id
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py
new file mode 100644
index 00000000..fb6990a0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py
@@ -0,0 +1,453 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+For general information about sampling, see `the specification <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#sampling>`_.
+
+OpenTelemetry provides two types of samplers:
+
+- `StaticSampler`
+- `TraceIdRatioBased`
+
+A `StaticSampler` always returns the same sampling result regardless of the conditions. Both possible StaticSamplers are already created:
+
+- Always sample spans: ALWAYS_ON
+- Never sample spans: ALWAYS_OFF
+
+A `TraceIdRatioBased` sampler makes a random sampling result based on the sampling probability given.
+
+If the span being sampled has a parent, `ParentBased` will respect the parent delegate sampler. Otherwise, it returns the sampling result from the given root sampler.
+
+Currently, sampling results are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 <https://github.com/open-telemetry/oteps/pull/115>`_).
+
+Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample` as well as `Sampler.get_description`.
+
+Samplers are able to modify the `opentelemetry.trace.span.TraceState` of the parent of the span being created. For custom samplers, it is suggested to implement `Sampler.should_sample` to utilize the
+parent span context's `opentelemetry.trace.span.TraceState` and pass into the `SamplingResult` instead of the explicit trace_state field passed into the parameter of `Sampler.should_sample`.
+
+To use a sampler, pass it into the tracer provider constructor. For example:
+
+.. code:: python
+
+    from opentelemetry import trace
+    from opentelemetry.sdk.trace import TracerProvider
+    from opentelemetry.sdk.trace.export import (
+        ConsoleSpanExporter,
+        SimpleSpanProcessor,
+    )
+    from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
+
+    # sample 1 in every 1000 traces
+    sampler = TraceIdRatioBased(1/1000)
+
+    # set the sampler onto the global tracer provider
+    trace.set_tracer_provider(TracerProvider(sampler=sampler))
+
+    # set up an exporter for sampled spans
+    trace.get_tracer_provider().add_span_processor(
+        SimpleSpanProcessor(ConsoleSpanExporter())
+    )
+
+    # created spans will now be sampled by the TraceIdRatioBased sampler
+    with trace.get_tracer(__name__).start_as_current_span("Test Span"):
+        ...
+
+The tracer sampler can also be configured via environment variables ``OTEL_TRACES_SAMPLER`` and ``OTEL_TRACES_SAMPLER_ARG`` (only if applicable).
+The list of built-in values for ``OTEL_TRACES_SAMPLER`` are:
+
+    * always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
+    * always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
+    * traceidratio - Sampler that samples probabilistically based on rate.
+    * parentbased_always_on - (default) Sampler that respects its parent span's sampling decision, but otherwise always samples.
+    * parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
+    * parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabilistically based on rate.
+
+Sampling probability can be set with ``OTEL_TRACES_SAMPLER_ARG`` if the sampler is traceidratio or parentbased_traceidratio. Rate must be in the range [0.0,1.0]. When not provided rate will be set to
+1.0 (maximum rate possible).
+
+Prev example but with environment variables. Please make sure to set the env ``OTEL_TRACES_SAMPLER=traceidratio`` and ``OTEL_TRACES_SAMPLER_ARG=0.001``.
+
+.. code:: python
+
+    from opentelemetry import trace
+    from opentelemetry.sdk.trace import TracerProvider
+    from opentelemetry.sdk.trace.export import (
+        ConsoleSpanExporter,
+        SimpleSpanProcessor,
+    )
+
+    trace.set_tracer_provider(TracerProvider())
+
+    # set up an exporter for sampled spans
+    trace.get_tracer_provider().add_span_processor(
+        SimpleSpanProcessor(ConsoleSpanExporter())
+    )
+
+    # created spans will now be sampled by the TraceIdRatioBased sampler with rate 1/1000.
+    with trace.get_tracer(__name__).start_as_current_span("Test Span"):
+        ...
+
+When utilizing a configurator, you can configure a custom sampler. In order to create a configurable custom sampler, create an entry point for the custom sampler
+factory method or function under the entry point group, ``opentelemetry_traces_sampler``. The custom sampler factory method must be of type ``Callable[[str], Sampler]``, taking a single string argument and
+returning a Sampler object. The single input will come from the string value of the ``OTEL_TRACES_SAMPLER_ARG`` environment variable. If ``OTEL_TRACES_SAMPLER_ARG`` is not configured, the input will
+be an empty string. For example:
+
+.. code:: python
+
+    setup(
+        ...
+        entry_points={
+            ...
+            "opentelemetry_traces_sampler": [
+                "custom_sampler_name = path.to.sampler.factory.method:CustomSamplerFactory.get_sampler"
+            ]
+        }
+    )
+    # ...
+    class CustomRatioSampler(Sampler):
+        def __init__(rate):
+            # ...
+    # ...
+    class CustomSamplerFactory:
+        @staticmethod
+        def get_sampler(sampler_argument):
+            try:
+                rate = float(sampler_argument)
+                return CustomSampler(rate)
+            except ValueError: # In case argument is empty string.
+                return CustomSampler(0.5)
+
+In order to configure you application with a custom sampler's entry point, set the ``OTEL_TRACES_SAMPLER`` environment variable to the key name of the entry point. For example, to configured the
+above sampler, set ``OTEL_TRACES_SAMPLER=custom_sampler_name`` and ``OTEL_TRACES_SAMPLER_ARG=0.5``.
+"""
+
+import abc
+import enum
+import os
+from logging import getLogger
+from types import MappingProxyType
+from typing import Optional, Sequence
+
+# pylint: disable=unused-import
+from opentelemetry.context import Context
+from opentelemetry.sdk.environment_variables import (
+    OTEL_TRACES_SAMPLER,
+    OTEL_TRACES_SAMPLER_ARG,
+)
+from opentelemetry.trace import Link, SpanKind, get_current_span
+from opentelemetry.trace.span import TraceState
+from opentelemetry.util.types import Attributes
+
+_logger = getLogger(__name__)
+
+
+class Decision(enum.Enum):
+    # IsRecording() == false, span will not be recorded and all events and attributes will be dropped.
+    DROP = 0
+    # IsRecording() == true, but Sampled flag MUST NOT be set.
+    RECORD_ONLY = 1
+    # IsRecording() == true AND Sampled flag` MUST be set.
+    RECORD_AND_SAMPLE = 2
+
+    def is_recording(self):
+        return self in (Decision.RECORD_ONLY, Decision.RECORD_AND_SAMPLE)
+
+    def is_sampled(self):
+        return self is Decision.RECORD_AND_SAMPLE
+
+
+class SamplingResult:
+    """A sampling result as applied to a newly-created Span.
+
+    Args:
+        decision: A sampling decision based off of whether the span is recorded
+            and the sampled flag in trace flags in the span context.
+        attributes: Attributes to add to the `opentelemetry.trace.Span`.
+        trace_state: The tracestate used for the `opentelemetry.trace.Span`.
+            Could possibly have been modified by the sampler.
+    """
+
+    def __repr__(self) -> str:
+        return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})"
+
+    def __init__(
+        self,
+        decision: Decision,
+        attributes: "Attributes" = None,
+        trace_state: Optional["TraceState"] = None,
+    ) -> None:
+        self.decision = decision
+        if attributes is None:
+            self.attributes = MappingProxyType({})
+        else:
+            self.attributes = MappingProxyType(attributes)
+        self.trace_state = trace_state
+
+
+class Sampler(abc.ABC):
+    @abc.abstractmethod
+    def should_sample(
+        self,
+        parent_context: Optional["Context"],
+        trace_id: int,
+        name: str,
+        kind: Optional[SpanKind] = None,
+        attributes: Attributes = None,
+        links: Optional[Sequence["Link"]] = None,
+        trace_state: Optional["TraceState"] = None,
+    ) -> "SamplingResult":
+        pass
+
+    @abc.abstractmethod
+    def get_description(self) -> str:
+        pass
+
+
+class StaticSampler(Sampler):
+    """Sampler that always returns the same decision."""
+
+    def __init__(self, decision: "Decision") -> None:
+        self._decision = decision
+
+    def should_sample(
+        self,
+        parent_context: Optional["Context"],
+        trace_id: int,
+        name: str,
+        kind: Optional[SpanKind] = None,
+        attributes: Attributes = None,
+        links: Optional[Sequence["Link"]] = None,
+        trace_state: Optional["TraceState"] = None,
+    ) -> "SamplingResult":
+        if self._decision is Decision.DROP:
+            attributes = None
+        return SamplingResult(
+            self._decision,
+            attributes,
+            _get_parent_trace_state(parent_context),
+        )
+
+    def get_description(self) -> str:
+        if self._decision is Decision.DROP:
+            return "AlwaysOffSampler"
+        return "AlwaysOnSampler"
+
+
+ALWAYS_OFF = StaticSampler(Decision.DROP)
+"""Sampler that never samples spans, regardless of the parent span's sampling decision."""
+
+ALWAYS_ON = StaticSampler(Decision.RECORD_AND_SAMPLE)
+"""Sampler that always samples spans, regardless of the parent span's sampling decision."""
+
+
+class TraceIdRatioBased(Sampler):
+    """
+    Sampler that makes sampling decisions probabilistically based on `rate`.
+
+    Args:
+        rate: Probability (between 0 and 1) that a span will be sampled
+    """
+
+    def __init__(self, rate: float):
+        if rate < 0.0 or rate > 1.0:
+            raise ValueError("Probability must be in range [0.0, 1.0].")
+        self._rate = rate
+        self._bound = self.get_bound_for_rate(self._rate)
+
+    # For compatibility with 64 bit trace IDs, the sampler checks the 64
+    # low-order bits of the trace ID to decide whether to sample a given trace.
+    TRACE_ID_LIMIT = (1 << 64) - 1
+
+    @classmethod
+    def get_bound_for_rate(cls, rate: float) -> int:
+        return round(rate * (cls.TRACE_ID_LIMIT + 1))
+
+    @property
+    def rate(self) -> float:
+        return self._rate
+
+    @property
+    def bound(self) -> int:
+        return self._bound
+
+    def should_sample(
+        self,
+        parent_context: Optional["Context"],
+        trace_id: int,
+        name: str,
+        kind: Optional[SpanKind] = None,
+        attributes: Attributes = None,
+        links: Optional[Sequence["Link"]] = None,
+        trace_state: Optional["TraceState"] = None,
+    ) -> "SamplingResult":
+        decision = Decision.DROP
+        if trace_id & self.TRACE_ID_LIMIT < self.bound:
+            decision = Decision.RECORD_AND_SAMPLE
+        if decision is Decision.DROP:
+            attributes = None
+        return SamplingResult(
+            decision,
+            attributes,
+            _get_parent_trace_state(parent_context),
+        )
+
+    def get_description(self) -> str:
+        return f"TraceIdRatioBased{{{self._rate}}}"
+
+
+class ParentBased(Sampler):
+    """
+    If a parent is set, applies the respective delegate sampler.
+    Otherwise, uses the root provided at initialization to make a
+    decision.
+
+    Args:
+        root: Sampler called for spans with no parent (root spans).
+        remote_parent_sampled: Sampler called for a remote sampled parent.
+        remote_parent_not_sampled: Sampler called for a remote parent that is
+            not sampled.
+        local_parent_sampled: Sampler called for a local sampled parent.
+        local_parent_not_sampled: Sampler called for a local parent that is
+            not sampled.
+    """
+
+    def __init__(
+        self,
+        root: Sampler,
+        remote_parent_sampled: Sampler = ALWAYS_ON,
+        remote_parent_not_sampled: Sampler = ALWAYS_OFF,
+        local_parent_sampled: Sampler = ALWAYS_ON,
+        local_parent_not_sampled: Sampler = ALWAYS_OFF,
+    ):
+        self._root = root
+        self._remote_parent_sampled = remote_parent_sampled
+        self._remote_parent_not_sampled = remote_parent_not_sampled
+        self._local_parent_sampled = local_parent_sampled
+        self._local_parent_not_sampled = local_parent_not_sampled
+
+    def should_sample(
+        self,
+        parent_context: Optional["Context"],
+        trace_id: int,
+        name: str,
+        kind: Optional[SpanKind] = None,
+        attributes: Attributes = None,
+        links: Optional[Sequence["Link"]] = None,
+        trace_state: Optional["TraceState"] = None,
+    ) -> "SamplingResult":
+        parent_span_context = get_current_span(
+            parent_context
+        ).get_span_context()
+        # default to the root sampler
+        sampler = self._root
+        # respect the sampling and remote flag of the parent if present
+        if parent_span_context is not None and parent_span_context.is_valid:
+            if parent_span_context.is_remote:
+                if parent_span_context.trace_flags.sampled:
+                    sampler = self._remote_parent_sampled
+                else:
+                    sampler = self._remote_parent_not_sampled
+            else:
+                if parent_span_context.trace_flags.sampled:
+                    sampler = self._local_parent_sampled
+                else:
+                    sampler = self._local_parent_not_sampled
+
+        return sampler.should_sample(
+            parent_context=parent_context,
+            trace_id=trace_id,
+            name=name,
+            kind=kind,
+            attributes=attributes,
+            links=links,
+        )
+
+    def get_description(self):
+        return f"ParentBased{{root:{self._root.get_description()},remoteParentSampled:{self._remote_parent_sampled.get_description()},remoteParentNotSampled:{self._remote_parent_not_sampled.get_description()},localParentSampled:{self._local_parent_sampled.get_description()},localParentNotSampled:{self._local_parent_not_sampled.get_description()}}}"
+
+
+DEFAULT_OFF = ParentBased(ALWAYS_OFF)
+"""Sampler that respects its parent span's sampling decision, but otherwise never samples."""
+
+DEFAULT_ON = ParentBased(ALWAYS_ON)
+"""Sampler that respects its parent span's sampling decision, but otherwise always samples."""
+
+
+class ParentBasedTraceIdRatio(ParentBased):
+    """
+    Sampler that respects its parent span's sampling decision, but otherwise
+    samples probabilistically based on `rate`.
+    """
+
+    def __init__(self, rate: float):
+        root = TraceIdRatioBased(rate=rate)
+        super().__init__(root=root)
+
+
+class _AlwaysOff(StaticSampler):
+    def __init__(self, _):
+        super().__init__(Decision.DROP)
+
+
+class _AlwaysOn(StaticSampler):
+    def __init__(self, _):
+        super().__init__(Decision.RECORD_AND_SAMPLE)
+
+
+class _ParentBasedAlwaysOff(ParentBased):
+    def __init__(self, _):
+        super().__init__(ALWAYS_OFF)
+
+
+class _ParentBasedAlwaysOn(ParentBased):
+    def __init__(self, _):
+        super().__init__(ALWAYS_ON)
+
+
+_KNOWN_SAMPLERS = {
+    "always_on": ALWAYS_ON,
+    "always_off": ALWAYS_OFF,
+    "parentbased_always_on": DEFAULT_ON,
+    "parentbased_always_off": DEFAULT_OFF,
+    "traceidratio": TraceIdRatioBased,
+    "parentbased_traceidratio": ParentBasedTraceIdRatio,
+}
+
+
+def _get_from_env_or_default() -> Sampler:
+    trace_sampler = os.getenv(
+        OTEL_TRACES_SAMPLER, "parentbased_always_on"
+    ).lower()
+    if trace_sampler not in _KNOWN_SAMPLERS:
+        _logger.warning("Couldn't recognize sampler %s.", trace_sampler)
+        trace_sampler = "parentbased_always_on"
+
+    if trace_sampler in ("traceidratio", "parentbased_traceidratio"):
+        try:
+            rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG))
+        except (ValueError, TypeError):
+            _logger.warning("Could not convert TRACES_SAMPLER_ARG to float.")
+            rate = 1.0
+        return _KNOWN_SAMPLERS[trace_sampler](rate)
+
+    return _KNOWN_SAMPLERS[trace_sampler]
+
+
+def _get_parent_trace_state(
+    parent_context: Optional[Context],
+) -> Optional["TraceState"]:
+    parent_span_context = get_current_span(parent_context).get_span_context()
+    if parent_span_context is None or not parent_span_context.is_valid:
+        return None
+    return parent_span_context.trace_state
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py
new file mode 100644
index 00000000..68f10ddc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py
@@ -0,0 +1,152 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import threading
+from collections import deque
+from collections.abc import MutableMapping, Sequence
+from typing import Optional
+
+from deprecated import deprecated
+
+
+def ns_to_iso_str(nanoseconds):
+    """Get an ISO 8601 string from time_ns value."""
+    ts = datetime.datetime.fromtimestamp(
+        nanoseconds / 1e9, tz=datetime.timezone.utc
+    )
+    return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+
+def get_dict_as_key(labels):
+    """Converts a dict to be used as a unique key"""
+    return tuple(
+        sorted(
+            map(
+                lambda kv: (
+                    (kv[0], tuple(kv[1])) if isinstance(kv[1], list) else kv
+                ),
+                labels.items(),
+            )
+        )
+    )
+
+
+class BoundedList(Sequence):
+    """An append only list with a fixed max size.
+
+    Calls to `append` and `extend` will drop the oldest elements if there is
+    not enough room.
+    """
+
+    def __init__(self, maxlen: Optional[int]):
+        self.dropped = 0
+        self._dq = deque(maxlen=maxlen)  # type: deque
+        self._lock = threading.Lock()
+
+    def __repr__(self):
+        return f"{type(self).__name__}({list(self._dq)}, maxlen={self._dq.maxlen})"
+
+    def __getitem__(self, index):
+        return self._dq[index]
+
+    def __len__(self):
+        return len(self._dq)
+
+    def __iter__(self):
+        with self._lock:
+            return iter(deque(self._dq))
+
+    def append(self, item):
+        with self._lock:
+            if (
+                self._dq.maxlen is not None
+                and len(self._dq) == self._dq.maxlen
+            ):
+                self.dropped += 1
+            self._dq.append(item)
+
+    def extend(self, seq):
+        with self._lock:
+            if self._dq.maxlen is not None:
+                to_drop = len(seq) + len(self._dq) - self._dq.maxlen
+                if to_drop > 0:
+                    self.dropped += to_drop
+            self._dq.extend(seq)
+
+    @classmethod
+    def from_seq(cls, maxlen, seq):
+        seq = tuple(seq)
+        bounded_list = cls(maxlen)
+        bounded_list.extend(seq)
+        return bounded_list
+
+
+@deprecated(version="1.4.0")  # type: ignore
+class BoundedDict(MutableMapping):
+    """An ordered dict with a fixed max capacity.
+
+    Oldest elements are dropped when the dict is full and a new element is
+    added.
+    """
+
+    def __init__(self, maxlen: Optional[int]):
+        if maxlen is not None:
+            if not isinstance(maxlen, int):
+                raise ValueError
+            if maxlen < 0:
+                raise ValueError
+        self.maxlen = maxlen
+        self.dropped = 0
+        self._dict = {}  # type: dict
+        self._lock = threading.Lock()  # type: threading.Lock
+
+    def __repr__(self):
+        return (
+            f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})"
+        )
+
+    def __getitem__(self, key):
+        return self._dict[key]
+
+    def __setitem__(self, key, value):
+        with self._lock:
+            if self.maxlen is not None and self.maxlen == 0:
+                self.dropped += 1
+                return
+
+            if key in self._dict:
+                del self._dict[key]
+            elif self.maxlen is not None and len(self._dict) == self.maxlen:
+                del self._dict[next(iter(self._dict.keys()))]
+                self.dropped += 1
+            self._dict[key] = value
+
+    def __delitem__(self, key):
+        del self._dict[key]
+
+    def __iter__(self):
+        with self._lock:
+            return iter(self._dict.copy())
+
+    def __len__(self):
+        return len(self._dict)
+
+    @classmethod
+    def from_map(cls, maxlen, mapping):
+        mapping = dict(mapping)
+        bounded_dict = cls(maxlen)
+        for key, value in mapping.items():
+            bounded_dict[key] = value
+        return bounded_dict
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi
new file mode 100644
index 00000000..55042fcf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi
@@ -0,0 +1,74 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import (
+    Iterable,
+    Iterator,
+    Mapping,
+    MutableMapping,
+    Sequence,
+    TypeVar,
+    overload,
+)
+
+from opentelemetry.util.types import AttributesAsKey, AttributeValue
+
+_T = TypeVar("_T")
+_KT = TypeVar("_KT")
+_VT = TypeVar("_VT")
+
+def ns_to_iso_str(nanoseconds: int) -> str: ...
+def get_dict_as_key(
+    labels: Mapping[str, AttributeValue],
+) -> AttributesAsKey: ...
+
+# pylint: disable=no-self-use
+class BoundedList(Sequence[_T]):
+    """An append only list with a fixed max size.
+
+    Calls to `append` and `extend` will drop the oldest elements if there is
+    not enough room.
+    """
+
+    dropped: int
+    def __init__(self, maxlen: int): ...
+    def insert(self, index: int, value: _T) -> None: ...
+    @overload
+    def __getitem__(self, i: int) -> _T: ...
+    @overload
+    def __getitem__(self, s: slice) -> Sequence[_T]: ...
+    def __len__(self) -> int: ...
+    def append(self, item: _T) -> None: ...
+    def extend(self, seq: Sequence[_T]) -> None: ...
+    @classmethod
+    def from_seq(cls, maxlen: int, seq: Iterable[_T]) -> BoundedList[_T]: ...  # pylint: disable=undefined-variable
+
+class BoundedDict(MutableMapping[_KT, _VT]):
+    """An ordered dict with a fixed max capacity.
+
+    Oldest elements are dropped when the dict is full and a new element is
+    added.
+    """
+
+    dropped: int
+    def __init__(self, maxlen: int): ...
+    def __getitem__(self, k: _KT) -> _VT: ...
+    def __setitem__(self, k: _KT, v: _VT) -> None: ...
+    def __delitem__(self, v: _KT) -> None: ...
+    def __iter__(self) -> Iterator[_KT]: ...
+    def __len__(self) -> int: ...
+    @classmethod
+    def from_map(
+        cls, maxlen: int, mapping: Mapping[_KT, _VT]
+    ) -> BoundedDict[_KT, _VT]: ...  # pylint: disable=undefined-variable
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py
new file mode 100644
index 00000000..6b45bf2a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py
@@ -0,0 +1,167 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from json import dumps
+from typing import Optional
+
+from deprecated import deprecated
+
+from opentelemetry.attributes import BoundedAttributes
+from opentelemetry.util.types import Attributes
+
+
+class InstrumentationInfo:
+    """Immutable information about an instrumentation library module.
+
+    See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
+    properties.
+    """
+
+    __slots__ = ("_name", "_version", "_schema_url")
+
+    @deprecated(version="1.11.1", reason="You should use InstrumentationScope")
+    def __init__(
+        self,
+        name: str,
+        version: Optional[str] = None,
+        schema_url: Optional[str] = None,
+    ):
+        self._name = name
+        self._version = version
+        if schema_url is None:
+            schema_url = ""
+        self._schema_url = schema_url
+
+    def __repr__(self):
+        return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
+
+    def __hash__(self):
+        return hash((self._name, self._version, self._schema_url))
+
+    def __eq__(self, value):
+        return type(value) is type(self) and (
+            self._name,
+            self._version,
+            self._schema_url,
+        ) == (value._name, value._version, value._schema_url)
+
+    def __lt__(self, value):
+        if type(value) is not type(self):
+            return NotImplemented
+        return (self._name, self._version, self._schema_url) < (
+            value._name,
+            value._version,
+            value._schema_url,
+        )
+
+    @property
+    def schema_url(self) -> Optional[str]:
+        return self._schema_url
+
+    @property
+    def version(self) -> Optional[str]:
+        return self._version
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+
+class InstrumentationScope:
+    """A logical unit of the application code with which the emitted telemetry can be
+    associated.
+
+    See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
+    properties.
+    """
+
+    __slots__ = ("_name", "_version", "_schema_url", "_attributes")
+
+    def __init__(
+        self,
+        name: str,
+        version: Optional[str] = None,
+        schema_url: Optional[str] = None,
+        attributes: Optional[Attributes] = None,
+    ) -> None:
+        self._name = name
+        self._version = version
+        if schema_url is None:
+            schema_url = ""
+        self._schema_url = schema_url
+        self._attributes = BoundedAttributes(attributes=attributes)
+
+    def __repr__(self) -> str:
+        return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url}, {self._attributes})"
+
+    def __hash__(self) -> int:
+        return hash((self._name, self._version, self._schema_url))
+
+    def __eq__(self, value: object) -> bool:
+        if not isinstance(value, InstrumentationScope):
+            return NotImplemented
+        return (
+            self._name,
+            self._version,
+            self._schema_url,
+            self._attributes,
+        ) == (
+            value._name,
+            value._version,
+            value._schema_url,
+            value._attributes,
+        )
+
+    def __lt__(self, value: object) -> bool:
+        if not isinstance(value, InstrumentationScope):
+            return NotImplemented
+        return (
+            self._name,
+            self._version,
+            self._schema_url,
+            self._attributes,
+        ) < (
+            value._name,
+            value._version,
+            value._schema_url,
+            value._attributes,
+        )
+
+    @property
+    def schema_url(self) -> Optional[str]:
+        return self._schema_url
+
+    @property
+    def version(self) -> Optional[str]:
+        return self._version
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    @property
+    def attributes(self) -> Attributes:
+        return self._attributes
+
+    def to_json(self, indent: Optional[int] = 4) -> str:
+        return dumps(
+            {
+                "name": self._name,
+                "version": self._version,
+                "schema_url": self._schema_url,
+                "attributes": (
+                    dict(self._attributes) if bool(self._attributes) else None
+                ),
+            },
+            indent=indent,
+        )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py
new file mode 100644
index 00000000..09125bac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py
@@ -0,0 +1,15 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "1.31.1"