diff options
Diffstat (limited to '.venv/lib/python3.12/site-packages/opentelemetry')
261 files changed, 41530 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_events/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/_events/__init__.py new file mode 100644 index 00000000..e1e6a675 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_events/__init__.py @@ -0,0 +1,221 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from logging import getLogger +from os import environ +from typing import Any, Optional, cast + +from opentelemetry._logs import LogRecord +from opentelemetry._logs.severity import SeverityNumber +from opentelemetry.environment_variables import ( + _OTEL_PYTHON_EVENT_LOGGER_PROVIDER, +) +from opentelemetry.trace.span import TraceFlags +from opentelemetry.util._once import Once +from opentelemetry.util._providers import _load_provider +from opentelemetry.util.types import Attributes + +_logger = getLogger(__name__) + + +class Event(LogRecord): + def __init__( + self, + name: str, + timestamp: Optional[int] = None, + trace_id: Optional[int] = None, + span_id: Optional[int] = None, + trace_flags: Optional["TraceFlags"] = None, + body: Optional[Any] = None, + severity_number: Optional[SeverityNumber] = None, + attributes: Optional[Attributes] = None, + ): + attributes = attributes or {} + event_attributes = {**attributes, "event.name": name} + super().__init__( + timestamp=timestamp, + trace_id=trace_id, + span_id=span_id, + trace_flags=trace_flags, + body=body, # type: ignore + severity_number=severity_number, + attributes=event_attributes, + ) + self.name = name + + +class EventLogger(ABC): + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ): + self._name = name + self._version = version + self._schema_url = schema_url + self._attributes = attributes + + @abstractmethod + def emit(self, event: "Event") -> None: + """Emits a :class:`Event` representing an event.""" + + +class NoOpEventLogger(EventLogger): + def emit(self, event: Event) -> None: + pass + + +class ProxyEventLogger(EventLogger): + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ): + super().__init__( + name=name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + self._real_event_logger: Optional[EventLogger] = None + self._noop_event_logger = NoOpEventLogger(name) + + @property + def _event_logger(self) -> EventLogger: + if self._real_event_logger: + return self._real_event_logger + + if _EVENT_LOGGER_PROVIDER: + self._real_event_logger = _EVENT_LOGGER_PROVIDER.get_event_logger( + self._name, + self._version, + self._schema_url, + self._attributes, + ) + return self._real_event_logger + return self._noop_event_logger + + def emit(self, event: Event) -> None: + self._event_logger.emit(event) + + +class EventLoggerProvider(ABC): + @abstractmethod + def get_event_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> EventLogger: + """Returns an EventLoggerProvider for use.""" + + +class NoOpEventLoggerProvider(EventLoggerProvider): + def get_event_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> EventLogger: + return NoOpEventLogger( + name, version=version, schema_url=schema_url, attributes=attributes + ) + + +class ProxyEventLoggerProvider(EventLoggerProvider): + def get_event_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> EventLogger: + if _EVENT_LOGGER_PROVIDER: + return _EVENT_LOGGER_PROVIDER.get_event_logger( + name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + return ProxyEventLogger( + name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + + +_EVENT_LOGGER_PROVIDER_SET_ONCE = Once() +_EVENT_LOGGER_PROVIDER: Optional[EventLoggerProvider] = None +_PROXY_EVENT_LOGGER_PROVIDER = ProxyEventLoggerProvider() + + +def get_event_logger_provider() -> EventLoggerProvider: + global _EVENT_LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned + if _EVENT_LOGGER_PROVIDER is None: + if _OTEL_PYTHON_EVENT_LOGGER_PROVIDER not in environ: + return _PROXY_EVENT_LOGGER_PROVIDER + + event_logger_provider: EventLoggerProvider = _load_provider( # type: ignore + _OTEL_PYTHON_EVENT_LOGGER_PROVIDER, "event_logger_provider" + ) + + _set_event_logger_provider(event_logger_provider, log=False) + + return cast("EventLoggerProvider", _EVENT_LOGGER_PROVIDER) + + +def _set_event_logger_provider( + event_logger_provider: EventLoggerProvider, log: bool +) -> None: + def set_elp() -> None: + global _EVENT_LOGGER_PROVIDER # pylint: disable=global-statement + _EVENT_LOGGER_PROVIDER = event_logger_provider + + did_set = _EVENT_LOGGER_PROVIDER_SET_ONCE.do_once(set_elp) + + if log and not did_set: + _logger.warning( + "Overriding of current EventLoggerProvider is not allowed" + ) + + +def set_event_logger_provider( + event_logger_provider: EventLoggerProvider, +) -> None: + _set_event_logger_provider(event_logger_provider, log=True) + + +def get_event_logger( + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + event_logger_provider: Optional[EventLoggerProvider] = None, +) -> "EventLogger": + if event_logger_provider is None: + event_logger_provider = get_event_logger_provider() + return event_logger_provider.get_event_logger( + name, + version, + schema_url, + attributes, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_events/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/_events/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_events/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_logs/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/__init__.py new file mode 100644 index 00000000..aaf29e5f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/__init__.py @@ -0,0 +1,59 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The OpenTelemetry logging API describes the classes used to generate logs and events. + +The :class:`.LoggerProvider` provides users access to the :class:`.Logger`. + +This module provides abstract (i.e. unimplemented) classes required for +logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications +to use the API package alone without a supporting implementation. + +To get a logger, you need to provide the package name from which you are +calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger` +with the calling module name and the version of your package. + +The following code shows how to obtain a logger using the global :class:`.LoggerProvider`:: + + from opentelemetry._logs import get_logger + + logger = get_logger("example-logger") + +.. versionadded:: 1.15.0 +""" + +from opentelemetry._logs._internal import ( + Logger, + LoggerProvider, + LogRecord, + NoOpLogger, + NoOpLoggerProvider, + get_logger, + get_logger_provider, + set_logger_provider, +) +from opentelemetry._logs.severity import SeverityNumber, std_to_otel + +__all__ = [ + "Logger", + "LoggerProvider", + "LogRecord", + "NoOpLogger", + "NoOpLoggerProvider", + "get_logger", + "get_logger_provider", + "set_logger_provider", + "SeverityNumber", + "std_to_otel", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_logs/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/_internal/__init__.py new file mode 100644 index 00000000..f20bd850 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/_internal/__init__.py @@ -0,0 +1,292 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The OpenTelemetry logging API describes the classes used to generate logs and events. + +The :class:`.LoggerProvider` provides users access to the :class:`.Logger`. + +This module provides abstract (i.e. unimplemented) classes required for +logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications +to use the API package alone without a supporting implementation. + +To get a logger, you need to provide the package name from which you are +calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger` +with the calling module name and the version of your package. + +The following code shows how to obtain a logger using the global :class:`.LoggerProvider`:: + + from opentelemetry._logs import get_logger + + logger = get_logger("example-logger") + +.. versionadded:: 1.15.0 +""" + +from abc import ABC, abstractmethod +from logging import getLogger +from os import environ +from time import time_ns +from typing import Any, Optional, cast + +from opentelemetry._logs.severity import SeverityNumber +from opentelemetry.environment_variables import _OTEL_PYTHON_LOGGER_PROVIDER +from opentelemetry.trace.span import TraceFlags +from opentelemetry.util._once import Once +from opentelemetry.util._providers import _load_provider +from opentelemetry.util.types import Attributes + +_logger = getLogger(__name__) + + +class LogRecord(ABC): + """A LogRecord instance represents an event being logged. + + LogRecord instances are created and emitted via `Logger` + every time something is logged. They contain all the information + pertinent to the event being logged. + """ + + def __init__( + self, + timestamp: Optional[int] = None, + observed_timestamp: Optional[int] = None, + trace_id: Optional[int] = None, + span_id: Optional[int] = None, + trace_flags: Optional["TraceFlags"] = None, + severity_text: Optional[str] = None, + severity_number: Optional[SeverityNumber] = None, + body: Optional[Any] = None, + attributes: Optional["Attributes"] = None, + ): + self.timestamp = timestamp + if observed_timestamp is None: + observed_timestamp = time_ns() + self.observed_timestamp = observed_timestamp + self.trace_id = trace_id + self.span_id = span_id + self.trace_flags = trace_flags + self.severity_text = severity_text + self.severity_number = severity_number + self.body = body # type: ignore + self.attributes = attributes + + +class Logger(ABC): + """Handles emitting events and logs via `LogRecord`.""" + + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> None: + super().__init__() + self._name = name + self._version = version + self._schema_url = schema_url + self._attributes = attributes + + @abstractmethod + def emit(self, record: "LogRecord") -> None: + """Emits a :class:`LogRecord` representing a log to the processing pipeline.""" + + +class NoOpLogger(Logger): + """The default Logger used when no Logger implementation is available. + + All operations are no-op. + """ + + def emit(self, record: "LogRecord") -> None: + pass + + +class ProxyLogger(Logger): + def __init__( # pylint: disable=super-init-not-called + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ): + self._name = name + self._version = version + self._schema_url = schema_url + self._attributes = attributes + self._real_logger: Optional[Logger] = None + self._noop_logger = NoOpLogger(name) + + @property + def _logger(self) -> Logger: + if self._real_logger: + return self._real_logger + + if _LOGGER_PROVIDER: + self._real_logger = _LOGGER_PROVIDER.get_logger( + self._name, + self._version, + self._schema_url, + self._attributes, + ) + return self._real_logger + return self._noop_logger + + def emit(self, record: LogRecord) -> None: + self._logger.emit(record) + + +class LoggerProvider(ABC): + """ + LoggerProvider is the entry point of the API. It provides access to Logger instances. + """ + + @abstractmethod + def get_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> Logger: + """Returns a `Logger` for use by the given instrumentation library. + + For any two calls it is undefined whether the same or different + `Logger` instances are returned, even for different library names. + + This function may return different `Logger` types (e.g. a no-op logger + vs. a functional logger). + + Args: + name: The name of the instrumenting module. + ``__name__`` may not be used as this can result in + different logger names if the loggers are in different files. + It is better to use a fixed string that can be imported where + needed and used consistently as the name of the logger. + + This should *not* be the name of the module that is + instrumented but the name of the module doing the instrumentation. + E.g., instead of ``"requests"``, use + ``"opentelemetry.instrumentation.requests"``. + + version: Optional. The version string of the + instrumenting library. Usually this should be the same as + ``importlib.metadata.version(instrumenting_library_name)``. + + schema_url: Optional. Specifies the Schema URL of the emitted telemetry. + """ + + +class NoOpLoggerProvider(LoggerProvider): + """The default LoggerProvider used when no LoggerProvider implementation is available.""" + + def get_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> Logger: + """Returns a NoOpLogger.""" + return NoOpLogger( + name, version=version, schema_url=schema_url, attributes=attributes + ) + + +class ProxyLoggerProvider(LoggerProvider): + def get_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> Logger: + if _LOGGER_PROVIDER: + return _LOGGER_PROVIDER.get_logger( + name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + return ProxyLogger( + name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + + +_LOGGER_PROVIDER_SET_ONCE = Once() +_LOGGER_PROVIDER: Optional[LoggerProvider] = None +_PROXY_LOGGER_PROVIDER = ProxyLoggerProvider() + + +def get_logger_provider() -> LoggerProvider: + """Gets the current global :class:`~.LoggerProvider` object.""" + global _LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned + if _LOGGER_PROVIDER is None: + if _OTEL_PYTHON_LOGGER_PROVIDER not in environ: + return _PROXY_LOGGER_PROVIDER + + logger_provider: LoggerProvider = _load_provider( # type: ignore + _OTEL_PYTHON_LOGGER_PROVIDER, "logger_provider" + ) + _set_logger_provider(logger_provider, log=False) + + # _LOGGER_PROVIDER will have been set by one thread + return cast("LoggerProvider", _LOGGER_PROVIDER) + + +def _set_logger_provider(logger_provider: LoggerProvider, log: bool) -> None: + def set_lp() -> None: + global _LOGGER_PROVIDER # pylint: disable=global-statement + _LOGGER_PROVIDER = logger_provider + + did_set = _LOGGER_PROVIDER_SET_ONCE.do_once(set_lp) + + if log and not did_set: + _logger.warning("Overriding of current LoggerProvider is not allowed") + + +def set_logger_provider(logger_provider: LoggerProvider) -> None: + """Sets the current global :class:`~.LoggerProvider` object. + + This can only be done once, a warning will be logged if any further attempt + is made. + """ + _set_logger_provider(logger_provider, log=True) + + +def get_logger( + instrumenting_module_name: str, + instrumenting_library_version: str = "", + logger_provider: Optional[LoggerProvider] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, +) -> "Logger": + """Returns a `Logger` for use within a python process. + + This function is a convenience wrapper for + opentelemetry.sdk._logs.LoggerProvider.get_logger. + + If logger_provider param is omitted the current configured one is used. + """ + if logger_provider is None: + logger_provider = get_logger_provider() + return logger_provider.get_logger( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_logs/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/_logs/severity/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/severity/__init__.py new file mode 100644 index 00000000..1daaa19f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/_logs/severity/__init__.py @@ -0,0 +1,115 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum + + +class SeverityNumber(enum.Enum): + """Numerical value of severity. + + Smaller numerical values correspond to less severe events + (such as debug events), larger numerical values correspond + to more severe events (such as errors and critical events). + + See the `Log Data Model`_ spec for more info and how to map the + severity from source format to OTLP Model. + + .. _Log Data Model: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber + """ + + UNSPECIFIED = 0 + TRACE = 1 + TRACE2 = 2 + TRACE3 = 3 + TRACE4 = 4 + DEBUG = 5 + DEBUG2 = 6 + DEBUG3 = 7 + DEBUG4 = 8 + INFO = 9 + INFO2 = 10 + INFO3 = 11 + INFO4 = 12 + WARN = 13 + WARN2 = 14 + WARN3 = 15 + WARN4 = 16 + ERROR = 17 + ERROR2 = 18 + ERROR3 = 19 + ERROR4 = 20 + FATAL = 21 + FATAL2 = 22 + FATAL3 = 23 + FATAL4 = 24 + + +_STD_TO_OTEL = { + 10: SeverityNumber.DEBUG, + 11: SeverityNumber.DEBUG2, + 12: SeverityNumber.DEBUG3, + 13: SeverityNumber.DEBUG4, + 14: SeverityNumber.DEBUG4, + 15: SeverityNumber.DEBUG4, + 16: SeverityNumber.DEBUG4, + 17: SeverityNumber.DEBUG4, + 18: SeverityNumber.DEBUG4, + 19: SeverityNumber.DEBUG4, + 20: SeverityNumber.INFO, + 21: SeverityNumber.INFO2, + 22: SeverityNumber.INFO3, + 23: SeverityNumber.INFO4, + 24: SeverityNumber.INFO4, + 25: SeverityNumber.INFO4, + 26: SeverityNumber.INFO4, + 27: SeverityNumber.INFO4, + 28: SeverityNumber.INFO4, + 29: SeverityNumber.INFO4, + 30: SeverityNumber.WARN, + 31: SeverityNumber.WARN2, + 32: SeverityNumber.WARN3, + 33: SeverityNumber.WARN4, + 34: SeverityNumber.WARN4, + 35: SeverityNumber.WARN4, + 36: SeverityNumber.WARN4, + 37: SeverityNumber.WARN4, + 38: SeverityNumber.WARN4, + 39: SeverityNumber.WARN4, + 40: SeverityNumber.ERROR, + 41: SeverityNumber.ERROR2, + 42: SeverityNumber.ERROR3, + 43: SeverityNumber.ERROR4, + 44: SeverityNumber.ERROR4, + 45: SeverityNumber.ERROR4, + 46: SeverityNumber.ERROR4, + 47: SeverityNumber.ERROR4, + 48: SeverityNumber.ERROR4, + 49: SeverityNumber.ERROR4, + 50: SeverityNumber.FATAL, + 51: SeverityNumber.FATAL2, + 52: SeverityNumber.FATAL3, + 53: SeverityNumber.FATAL4, +} + + +def std_to_otel(levelno: int) -> SeverityNumber: + """ + Map python log levelno as defined in https://docs.python.org/3/library/logging.html#logging-levels + to OTel log severity number as defined here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber + """ + if levelno < 10: + return SeverityNumber.UNSPECIFIED + if levelno > 53: + return SeverityNumber.FATAL4 + return _STD_TO_OTEL[levelno] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/attributes/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/attributes/__init__.py new file mode 100644 index 00000000..49795298 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/attributes/__init__.py @@ -0,0 +1,204 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import threading +from collections import OrderedDict +from collections.abc import MutableMapping +from typing import Optional, Sequence, Tuple, Union + +from opentelemetry.util import types + +# bytes are accepted as a user supplied value for attributes but +# decoded to strings internally. +_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float) + + +_logger = logging.getLogger(__name__) + + +def _clean_attribute( + key: str, value: types.AttributeValue, max_len: Optional[int] +) -> Optional[Union[types.AttributeValue, Tuple[Union[str, int, float], ...]]]: + """Checks if attribute value is valid and cleans it if required. + + The function returns the cleaned value or None if the value is not valid. + + An attribute value is valid if it is either: + - A primitive type: string, boolean, double precision floating + point (IEEE 754-1985) or integer. + - An array of primitive type values. The array MUST be homogeneous, + i.e. it MUST NOT contain values of different types. + + An attribute needs cleansing if: + - Its length is greater than the maximum allowed length. + - It needs to be encoded/decoded e.g, bytes to strings. + """ + + if not (key and isinstance(key, str)): + _logger.warning("invalid key `%s`. must be non-empty string.", key) + return None + + if isinstance(value, _VALID_ATTR_VALUE_TYPES): + return _clean_attribute_value(value, max_len) + + if isinstance(value, Sequence): + sequence_first_valid_type = None + cleaned_seq = [] + + for element in value: + element = _clean_attribute_value(element, max_len) # type: ignore + if element is None: + cleaned_seq.append(element) + continue + + element_type = type(element) + # Reject attribute value if sequence contains a value with an incompatible type. + if element_type not in _VALID_ATTR_VALUE_TYPES: + _logger.warning( + "Invalid type %s in attribute '%s' value sequence. Expected one of " + "%s or None", + element_type.__name__, + key, + [ + valid_type.__name__ + for valid_type in _VALID_ATTR_VALUE_TYPES + ], + ) + return None + + # The type of the sequence must be homogeneous. The first non-None + # element determines the type of the sequence + if sequence_first_valid_type is None: + sequence_first_valid_type = element_type + # use equality instead of isinstance as isinstance(True, int) evaluates to True + elif element_type != sequence_first_valid_type: + _logger.warning( + "Attribute %r mixes types %s and %s in attribute value sequence", + key, + sequence_first_valid_type.__name__, + type(element).__name__, + ) + return None + + cleaned_seq.append(element) + + # Freeze mutable sequences defensively + return tuple(cleaned_seq) + + _logger.warning( + "Invalid type %s for attribute '%s' value. Expected one of %s or a " + "sequence of those types", + type(value).__name__, + key, + [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES], + ) + return None + + +def _clean_attribute_value( + value: types.AttributeValue, limit: Optional[int] +) -> Optional[types.AttributeValue]: + if value is None: + return None + + if isinstance(value, bytes): + try: + value = value.decode() + except UnicodeDecodeError: + _logger.warning("Byte attribute could not be decoded.") + return None + + if limit is not None and isinstance(value, str): + value = value[:limit] + return value + + +class BoundedAttributes(MutableMapping): # type: ignore + """An ordered dict with a fixed max capacity. + + Oldest elements are dropped when the dict is full and a new element is + added. + """ + + def __init__( + self, + maxlen: Optional[int] = None, + attributes: types.Attributes = None, + immutable: bool = True, + max_value_len: Optional[int] = None, + ): + if maxlen is not None: + if not isinstance(maxlen, int) or maxlen < 0: + raise ValueError( + "maxlen must be valid int greater or equal to 0" + ) + self.maxlen = maxlen + self.dropped = 0 + self.max_value_len = max_value_len + # OrderedDict is not used until the maxlen is reached for efficiency. + + self._dict: Union[ + MutableMapping[str, types.AttributeValue], + OrderedDict[str, types.AttributeValue], + ] = {} + self._lock = threading.RLock() + if attributes: + for key, value in attributes.items(): + self[key] = value + self._immutable = immutable + + def __repr__(self) -> str: + return f"{dict(self._dict)}" + + def __getitem__(self, key: str) -> types.AttributeValue: + return self._dict[key] + + def __setitem__(self, key: str, value: types.AttributeValue) -> None: + if getattr(self, "_immutable", False): # type: ignore + raise TypeError + with self._lock: + if self.maxlen is not None and self.maxlen == 0: + self.dropped += 1 + return + + value = _clean_attribute(key, value, self.max_value_len) # type: ignore + if value is not None: + if key in self._dict: + del self._dict[key] + elif ( + self.maxlen is not None and len(self._dict) == self.maxlen + ): + if not isinstance(self._dict, OrderedDict): + self._dict = OrderedDict(self._dict) + self._dict.popitem(last=False) # type: ignore + self.dropped += 1 + + self._dict[key] = value # type: ignore + + def __delitem__(self, key: str) -> None: + if getattr(self, "_immutable", False): # type: ignore + raise TypeError + with self._lock: + del self._dict[key] + + def __iter__(self): # type: ignore + with self._lock: + return iter(self._dict.copy()) # type: ignore + + def __len__(self) -> int: + return len(self._dict) + + def copy(self): # type: ignore + return self._dict.copy() # type: ignore diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/attributes/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/attributes/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/attributes/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/baggage/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/__init__.py new file mode 100644 index 00000000..9a740200 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/__init__.py @@ -0,0 +1,132 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from re import compile +from types import MappingProxyType +from typing import Mapping, Optional + +from opentelemetry.context import create_key, get_value, set_value +from opentelemetry.context.context import Context +from opentelemetry.util.re import ( + _BAGGAGE_PROPERTY_FORMAT, + _KEY_FORMAT, + _VALUE_FORMAT, +) + +_BAGGAGE_KEY = create_key("baggage") +_logger = getLogger(__name__) + +_KEY_PATTERN = compile(_KEY_FORMAT) +_VALUE_PATTERN = compile(_VALUE_FORMAT) +_PROPERT_PATTERN = compile(_BAGGAGE_PROPERTY_FORMAT) + + +def get_all( + context: Optional[Context] = None, +) -> Mapping[str, object]: + """Returns the name/value pairs in the Baggage + + Args: + context: The Context to use. If not set, uses current Context + + Returns: + The name/value pairs in the Baggage + """ + baggage = get_value(_BAGGAGE_KEY, context=context) + if isinstance(baggage, dict): + return MappingProxyType(baggage) + return MappingProxyType({}) + + +def get_baggage( + name: str, context: Optional[Context] = None +) -> Optional[object]: + """Provides access to the value for a name/value pair in the + Baggage + + Args: + name: The name of the value to retrieve + context: The Context to use. If not set, uses current Context + + Returns: + The value associated with the given name, or null if the given name is + not present. + """ + return get_all(context=context).get(name) + + +def set_baggage( + name: str, value: object, context: Optional[Context] = None +) -> Context: + """Sets a value in the Baggage + + Args: + name: The name of the value to set + value: The value to set + context: The Context to use. If not set, uses current Context + + Returns: + A Context with the value updated + """ + baggage = dict(get_all(context=context)) + baggage[name] = value + return set_value(_BAGGAGE_KEY, baggage, context=context) + + +def remove_baggage(name: str, context: Optional[Context] = None) -> Context: + """Removes a value from the Baggage + + Args: + name: The name of the value to remove + context: The Context to use. If not set, uses current Context + + Returns: + A Context with the name/value removed + """ + baggage = dict(get_all(context=context)) + baggage.pop(name, None) + + return set_value(_BAGGAGE_KEY, baggage, context=context) + + +def clear(context: Optional[Context] = None) -> Context: + """Removes all values from the Baggage + + Args: + context: The Context to use. If not set, uses current Context + + Returns: + A Context with all baggage entries removed + """ + return set_value(_BAGGAGE_KEY, {}, context=context) + + +def _is_valid_key(name: str) -> bool: + return _KEY_PATTERN.fullmatch(str(name)) is not None + + +def _is_valid_value(value: object) -> bool: + parts = str(value).split(";") + is_valid_value = _VALUE_PATTERN.fullmatch(parts[0]) is not None + if len(parts) > 1: # one or more properties metadata + for property in parts[1:]: + if _PROPERT_PATTERN.fullmatch(property) is None: + is_valid_value = False + break + return is_valid_value + + +def _is_valid_pair(key: str, value: str) -> bool: + return _is_valid_key(key) and _is_valid_value(value) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/baggage/propagation/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/propagation/__init__.py new file mode 100644 index 00000000..49fb378e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/propagation/__init__.py @@ -0,0 +1,146 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from logging import getLogger +from re import split +from typing import Iterable, List, Mapping, Optional, Set +from urllib.parse import quote_plus, unquote_plus + +from opentelemetry.baggage import _is_valid_pair, get_all, set_baggage +from opentelemetry.context import get_current +from opentelemetry.context.context import Context +from opentelemetry.propagators import textmap +from opentelemetry.util.re import _DELIMITER_PATTERN + +_logger = getLogger(__name__) + + +class W3CBaggagePropagator(textmap.TextMapPropagator): + """Extracts and injects Baggage which is used to annotate telemetry.""" + + _MAX_HEADER_LENGTH = 8192 + _MAX_PAIR_LENGTH = 4096 + _MAX_PAIRS = 180 + _BAGGAGE_HEADER_NAME = "baggage" + + def extract( + self, + carrier: textmap.CarrierT, + context: Optional[Context] = None, + getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, + ) -> Context: + """Extract Baggage from the carrier. + + See + `opentelemetry.propagators.textmap.TextMapPropagator.extract` + """ + + if context is None: + context = get_current() + + header = _extract_first_element( + getter.get(carrier, self._BAGGAGE_HEADER_NAME) + ) + + if not header: + return context + + if len(header) > self._MAX_HEADER_LENGTH: + _logger.warning( + "Baggage header `%s` exceeded the maximum number of bytes per baggage-string", + header, + ) + return context + + baggage_entries: List[str] = split(_DELIMITER_PATTERN, header) + total_baggage_entries = self._MAX_PAIRS + + if len(baggage_entries) > self._MAX_PAIRS: + _logger.warning( + "Baggage header `%s` exceeded the maximum number of list-members", + header, + ) + + for entry in baggage_entries: + if len(entry) > self._MAX_PAIR_LENGTH: + _logger.warning( + "Baggage entry `%s` exceeded the maximum number of bytes per list-member", + entry, + ) + continue + if not entry: # empty string + continue + try: + name, value = entry.split("=", 1) + except Exception: # pylint: disable=broad-exception-caught + _logger.warning( + "Baggage list-member `%s` doesn't match the format", entry + ) + continue + + if not _is_valid_pair(name, value): + _logger.warning("Invalid baggage entry: `%s`", entry) + continue + + name = unquote_plus(name).strip() + value = unquote_plus(value).strip() + + context = set_baggage( + name, + value, + context=context, + ) + total_baggage_entries -= 1 + if total_baggage_entries == 0: + break + + return context + + def inject( + self, + carrier: textmap.CarrierT, + context: Optional[Context] = None, + setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, + ) -> None: + """Injects Baggage into the carrier. + + See + `opentelemetry.propagators.textmap.TextMapPropagator.inject` + """ + baggage_entries = get_all(context=context) + if not baggage_entries: + return + + baggage_string = _format_baggage(baggage_entries) + setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string) + + @property + def fields(self) -> Set[str]: + """Returns a set with the fields set in `inject`.""" + return {self._BAGGAGE_HEADER_NAME} + + +def _format_baggage(baggage_entries: Mapping[str, object]) -> str: + return ",".join( + quote_plus(str(key)) + "=" + quote_plus(str(value)) + for key, value in baggage_entries.items() + ) + + +def _extract_first_element( + items: Optional[Iterable[textmap.CarrierT]], +) -> Optional[textmap.CarrierT]: + if items is None: + return None + return next(iter(items), None) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/baggage/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/baggage/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/context/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/context/__init__.py new file mode 100644 index 00000000..cad7f951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/context/__init__.py @@ -0,0 +1,176 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import typing +from contextvars import Token +from os import environ +from uuid import uuid4 + +# pylint: disable=wrong-import-position +from opentelemetry.context.context import Context, _RuntimeContext # noqa +from opentelemetry.environment_variables import OTEL_PYTHON_CONTEXT +from opentelemetry.util._importlib_metadata import entry_points + +logger = logging.getLogger(__name__) + + +def _load_runtime_context() -> _RuntimeContext: + """Initialize the RuntimeContext + + Returns: + An instance of RuntimeContext. + """ + + # FIXME use a better implementation of a configuration manager + # to avoid having to get configuration values straight from + # environment variables + default_context = "contextvars_context" + + configured_context = environ.get(OTEL_PYTHON_CONTEXT, default_context) # type: str + + try: + return next( # type: ignore + iter( # type: ignore + entry_points( # type: ignore + group="opentelemetry_context", + name=configured_context, + ) + ) + ).load()() + except Exception: # pylint: disable=broad-exception-caught + logger.exception( + "Failed to load context: %s, fallback to %s", + configured_context, + default_context, + ) + return next( # type: ignore + iter( # type: ignore + entry_points( # type: ignore + group="opentelemetry_context", + name=default_context, + ) + ) + ).load()() + + +_RUNTIME_CONTEXT = _load_runtime_context() + + +def create_key(keyname: str) -> str: + """To allow cross-cutting concern to control access to their local state, + the RuntimeContext API provides a function which takes a keyname as input, + and returns a unique key. + Args: + keyname: The key name is for debugging purposes and is not required to be unique. + Returns: + A unique string representing the newly created key. + """ + return keyname + "-" + str(uuid4()) + + +def get_value(key: str, context: typing.Optional[Context] = None) -> "object": + """To access the local state of a concern, the RuntimeContext API + provides a function which takes a context and a key as input, + and returns a value. + + Args: + key: The key of the value to retrieve. + context: The context from which to retrieve the value, if None, the current context is used. + + Returns: + The value associated with the key. + """ + return context.get(key) if context is not None else get_current().get(key) + + +def set_value( + key: str, value: "object", context: typing.Optional[Context] = None +) -> Context: + """To record the local state of a cross-cutting concern, the + RuntimeContext API provides a function which takes a context, a + key, and a value as input, and returns an updated context + which contains the new value. + + Args: + key: The key of the entry to set. + value: The value of the entry to set. + context: The context to copy, if None, the current context is used. + + Returns: + A new `Context` containing the value set. + """ + if context is None: + context = get_current() + new_values = context.copy() + new_values[key] = value + return Context(new_values) + + +def get_current() -> Context: + """To access the context associated with program execution, + the Context API provides a function which takes no arguments + and returns a Context. + + Returns: + The current `Context` object. + """ + return _RUNTIME_CONTEXT.get_current() + + +def attach(context: Context) -> Token[Context]: + """Associates a Context with the caller's current execution unit. Returns + a token that can be used to restore the previous Context. + + Args: + context: The Context to set as current. + + Returns: + A token that can be used with `detach` to reset the context. + """ + return _RUNTIME_CONTEXT.attach(context) + + +def detach(token: Token[Context]) -> None: + """Resets the Context associated with the caller's current execution unit + to the value it had before attaching a specified Context. + + Args: + token: The Token that was returned by a previous call to attach a Context. + """ + try: + _RUNTIME_CONTEXT.detach(token) + except Exception: # pylint: disable=broad-exception-caught + logger.exception("Failed to detach context") + + +# FIXME This is a temporary location for the suppress instrumentation key. +# Once the decision around how to suppress instrumentation is made in the +# spec, this key should be moved accordingly. +_SUPPRESS_INSTRUMENTATION_KEY = create_key("suppress_instrumentation") +_SUPPRESS_HTTP_INSTRUMENTATION_KEY = create_key( + "suppress_http_instrumentation" +) + +__all__ = [ + "Context", + "attach", + "create_key", + "detach", + "get_current", + "get_value", + "set_value", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/context/context.py b/.venv/lib/python3.12/site-packages/opentelemetry/context/context.py new file mode 100644 index 00000000..c1ef9cfb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/context/context.py @@ -0,0 +1,56 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import typing +from abc import ABC, abstractmethod +from contextvars import Token + + +class Context(typing.Dict[str, object]): + def __setitem__(self, key: str, value: object) -> None: + raise ValueError + + +class _RuntimeContext(ABC): + """The RuntimeContext interface provides a wrapper for the different + mechanisms that are used to propagate context in Python. + Implementations can be made available via entry_points and + selected through environment variables. + """ + + @abstractmethod + def attach(self, context: Context) -> Token[Context]: + """Sets the current `Context` object. Returns a + token that can be used to reset to the previous `Context`. + + Args: + context: The Context to set. + """ + + @abstractmethod + def get_current(self) -> Context: + """Returns the current `Context` object.""" + + @abstractmethod + def detach(self, token: Token[Context]) -> None: + """Resets Context to a previous value + + Args: + token: A reference to a previous Context. + """ + + +__all__ = ["Context"] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/context/contextvars_context.py b/.venv/lib/python3.12/site-packages/opentelemetry/context/contextvars_context.py new file mode 100644 index 00000000..dceee263 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/context/contextvars_context.py @@ -0,0 +1,56 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from contextvars import ContextVar, Token + +from opentelemetry.context.context import Context, _RuntimeContext + + +class ContextVarsRuntimeContext(_RuntimeContext): + """An implementation of the RuntimeContext interface which wraps ContextVar under + the hood. This is the preferred implementation for usage with Python 3.5+ + """ + + _CONTEXT_KEY = "current_context" + + def __init__(self) -> None: + self._current_context = ContextVar( + self._CONTEXT_KEY, default=Context() + ) + + def attach(self, context: Context) -> Token[Context]: + """Sets the current `Context` object. Returns a + token that can be used to reset to the previous `Context`. + + Args: + context: The Context to set. + """ + return self._current_context.set(context) + + def get_current(self) -> Context: + """Returns the current `Context` object.""" + return self._current_context.get() + + def detach(self, token: Token[Context]) -> None: + """Resets Context to a previous value + + Args: + token: A reference to a previous Context. + """ + self._current_context.reset(token) + + +__all__ = ["ContextVarsRuntimeContext"] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/context/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/context/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/context/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/__init__.py new file mode 100644 index 00000000..bd8ed1cb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/__init__.py @@ -0,0 +1,88 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OTEL_LOGS_EXPORTER = "OTEL_LOGS_EXPORTER" +""" +.. envvar:: OTEL_LOGS_EXPORTER + +""" + +OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" +""" +.. envvar:: OTEL_METRICS_EXPORTER + +Specifies which exporter is used for metrics. See `General SDK Configuration +<https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/#otel_metrics_exporter>`_. + +**Default value:** ``"otlp"`` + +**Example:** + +``export OTEL_METRICS_EXPORTER="prometheus"`` + +Accepted values for ``OTEL_METRICS_EXPORTER`` are: + +- ``"otlp"`` +- ``"prometheus"`` +- ``"none"``: No automatically configured exporter for metrics. + +.. note:: + + Exporter packages may add entry points for group ``opentelemetry_metrics_exporter`` which + can then be used with this environment variable by name. The entry point should point to + either a `opentelemetry.sdk.metrics.export.MetricExporter` (push exporter) or + `opentelemetry.sdk.metrics.export.MetricReader` (pull exporter) subclass; it must be + constructable without any required arguments. This mechanism is considered experimental and + may change in subsequent releases. +""" + +OTEL_PROPAGATORS = "OTEL_PROPAGATORS" +""" +.. envvar:: OTEL_PROPAGATORS +""" + +OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT" +""" +.. envvar:: OTEL_PYTHON_CONTEXT +""" + +OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR" +""" +.. envvar:: OTEL_PYTHON_ID_GENERATOR +""" + +OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER" +""" +.. envvar:: OTEL_TRACES_EXPORTER +""" + +OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER" +""" +.. envvar:: OTEL_PYTHON_TRACER_PROVIDER +""" + +OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER" +""" +.. envvar:: OTEL_PYTHON_METER_PROVIDER +""" + +_OTEL_PYTHON_LOGGER_PROVIDER = "OTEL_PYTHON_LOGGER_PROVIDER" +""" +.. envvar:: OTEL_PYTHON_LOGGER_PROVIDER +""" + +_OTEL_PYTHON_EVENT_LOGGER_PROVIDER = "OTEL_PYTHON_EVENT_LOGGER_PROVIDER" +""" +.. envvar:: OTEL_PYTHON_EVENT_LOGGER_PROVIDER +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/environment_variables/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/_semconv.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/_semconv.py new file mode 100644 index 00000000..091c8765 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/_semconv.py @@ -0,0 +1,435 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import threading +from enum import Enum + +from opentelemetry.instrumentation.utils import http_status_to_status_code +from opentelemetry.semconv.attributes.client_attributes import ( + CLIENT_ADDRESS, + CLIENT_PORT, +) +from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE +from opentelemetry.semconv.attributes.http_attributes import ( + HTTP_REQUEST_METHOD, + HTTP_REQUEST_METHOD_ORIGINAL, + HTTP_RESPONSE_STATUS_CODE, + HTTP_ROUTE, +) +from opentelemetry.semconv.attributes.network_attributes import ( + NETWORK_PROTOCOL_VERSION, +) +from opentelemetry.semconv.attributes.server_attributes import ( + SERVER_ADDRESS, + SERVER_PORT, +) +from opentelemetry.semconv.attributes.url_attributes import ( + URL_FULL, + URL_PATH, + URL_QUERY, + URL_SCHEME, +) +from opentelemetry.semconv.attributes.user_agent_attributes import ( + USER_AGENT_ORIGINAL, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace.status import Status, StatusCode + +# These lists represent attributes for metrics that are currently supported + +_client_duration_attrs_old = [ + SpanAttributes.HTTP_STATUS_CODE, + SpanAttributes.HTTP_HOST, + SpanAttributes.NET_PEER_PORT, + SpanAttributes.NET_PEER_NAME, + SpanAttributes.HTTP_METHOD, + SpanAttributes.HTTP_FLAVOR, + SpanAttributes.HTTP_SCHEME, +] + +_client_duration_attrs_new = [ + ERROR_TYPE, + HTTP_REQUEST_METHOD, + HTTP_RESPONSE_STATUS_CODE, + NETWORK_PROTOCOL_VERSION, + SERVER_ADDRESS, + SERVER_PORT, + # TODO: Support opt-in for scheme in new semconv + # URL_SCHEME, +] + +_server_duration_attrs_old = [ + SpanAttributes.HTTP_METHOD, + SpanAttributes.HTTP_HOST, + SpanAttributes.HTTP_SCHEME, + SpanAttributes.HTTP_STATUS_CODE, + SpanAttributes.HTTP_FLAVOR, + SpanAttributes.HTTP_SERVER_NAME, + SpanAttributes.NET_HOST_NAME, + SpanAttributes.NET_HOST_PORT, +] + +_server_duration_attrs_new = [ + ERROR_TYPE, + HTTP_REQUEST_METHOD, + HTTP_RESPONSE_STATUS_CODE, + HTTP_ROUTE, + NETWORK_PROTOCOL_VERSION, + URL_SCHEME, +] + +_server_active_requests_count_attrs_old = [ + SpanAttributes.HTTP_METHOD, + SpanAttributes.HTTP_HOST, + SpanAttributes.HTTP_SCHEME, + SpanAttributes.HTTP_FLAVOR, + SpanAttributes.HTTP_SERVER_NAME, +] + +_server_active_requests_count_attrs_new = [ + HTTP_REQUEST_METHOD, + URL_SCHEME, + # TODO: Support SERVER_ADDRESS AND SERVER_PORT +] + +OTEL_SEMCONV_STABILITY_OPT_IN = "OTEL_SEMCONV_STABILITY_OPT_IN" + + +class _OpenTelemetryStabilitySignalType: + HTTP = "http" + DATABASE = "database" + + +class _StabilityMode(Enum): + DEFAULT = "default" + HTTP = "http" + HTTP_DUP = "http/dup" + DATABASE = "database" + DATABASE_DUP = "database/dup" + + +def _report_new(mode: _StabilityMode): + return mode != _StabilityMode.DEFAULT + + +def _report_old(mode: _StabilityMode): + return mode not in (_StabilityMode.HTTP, _StabilityMode.DATABASE) + + +class _OpenTelemetrySemanticConventionStability: + _initialized = False + _lock = threading.Lock() + _OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING = {} + + @classmethod + def _initialize(cls): + with cls._lock: + if cls._initialized: + return + + # Users can pass in comma delimited string for opt-in options + # Only values for http and database stability are supported for now + opt_in = os.environ.get(OTEL_SEMCONV_STABILITY_OPT_IN) + + if not opt_in: + # early return in case of default + cls._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING = { + _OpenTelemetryStabilitySignalType.HTTP: _StabilityMode.DEFAULT, + _OpenTelemetryStabilitySignalType.DATABASE: _StabilityMode.DEFAULT, + } + cls._initialized = True + return + + opt_in_list = [s.strip() for s in opt_in.split(",")] + + cls._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING[ + _OpenTelemetryStabilitySignalType.HTTP + ] = cls._filter_mode( + opt_in_list, _StabilityMode.HTTP, _StabilityMode.HTTP_DUP + ) + + cls._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING[ + _OpenTelemetryStabilitySignalType.DATABASE + ] = cls._filter_mode( + opt_in_list, + _StabilityMode.DATABASE, + _StabilityMode.DATABASE_DUP, + ) + + cls._initialized = True + + @staticmethod + def _filter_mode(opt_in_list, stable_mode, dup_mode): + # Process semconv stability opt-in + # http/dup,database/dup has higher precedence over http,database + if dup_mode.value in opt_in_list: + return dup_mode + + return ( + stable_mode + if stable_mode.value in opt_in_list + else _StabilityMode.DEFAULT + ) + + @classmethod + def _get_opentelemetry_stability_opt_in_mode( + cls, signal_type: _OpenTelemetryStabilitySignalType + ) -> _StabilityMode: + # Get OpenTelemetry opt-in mode based off of signal type (http, messaging, etc.) + return cls._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING.get( + signal_type, _StabilityMode.DEFAULT + ) + + +def _filter_semconv_duration_attrs( + attrs, + old_attrs, + new_attrs, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, +): + filtered_attrs = {} + # duration is two different metrics depending on sem_conv_opt_in_mode, so no DUP attributes + allowed_attributes = ( + new_attrs if sem_conv_opt_in_mode == _StabilityMode.HTTP else old_attrs + ) + for key, val in attrs.items(): + if key in allowed_attributes: + filtered_attrs[key] = val + return filtered_attrs + + +def _filter_semconv_active_request_count_attr( + attrs, + old_attrs, + new_attrs, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, +): + filtered_attrs = {} + if _report_old(sem_conv_opt_in_mode): + for key, val in attrs.items(): + if key in old_attrs: + filtered_attrs[key] = val + if _report_new(sem_conv_opt_in_mode): + for key, val in attrs.items(): + if key in new_attrs: + filtered_attrs[key] = val + return filtered_attrs + + +def set_string_attribute(result, key, value): + if value: + result[key] = value + + +def set_int_attribute(result, key, value): + if value: + try: + result[key] = int(value) + except ValueError: + return + + +def _set_http_method(result, original, normalized, sem_conv_opt_in_mode): + original = original.strip() + normalized = normalized.strip() + # See https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/http-spans.md#common-attributes + # Method is case sensitive. "http.request.method_original" should not be sanitized or automatically capitalized. + if original != normalized and _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, HTTP_REQUEST_METHOD_ORIGINAL, original) + + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_METHOD, normalized) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, HTTP_REQUEST_METHOD, normalized) + + +def _set_http_status_code(result, code, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_int_attribute(result, SpanAttributes.HTTP_STATUS_CODE, code) + if _report_new(sem_conv_opt_in_mode): + set_int_attribute(result, HTTP_RESPONSE_STATUS_CODE, code) + + +def _set_http_url(result, url, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_URL, url) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, URL_FULL, url) + + +def _set_http_scheme(result, scheme, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_SCHEME, scheme) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, URL_SCHEME, scheme) + + +def _set_http_flavor_version(result, version, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_FLAVOR, version) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, NETWORK_PROTOCOL_VERSION, version) + + +def _set_http_user_agent(result, user_agent, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute( + result, SpanAttributes.HTTP_USER_AGENT, user_agent + ) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, USER_AGENT_ORIGINAL, user_agent) + + +# Client + + +def _set_http_host_client(result, host, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_HOST, host) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, SERVER_ADDRESS, host) + + +def _set_http_net_peer_name_client(result, peer_name, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.NET_PEER_NAME, peer_name) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, SERVER_ADDRESS, peer_name) + + +def _set_http_peer_port_client(result, port, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_int_attribute(result, SpanAttributes.NET_PEER_PORT, port) + if _report_new(sem_conv_opt_in_mode): + set_int_attribute(result, SERVER_PORT, port) + + +def _set_http_network_protocol_version(result, version, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_FLAVOR, version) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, NETWORK_PROTOCOL_VERSION, version) + + +# Server + + +def _set_http_net_host(result, host, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.NET_HOST_NAME, host) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, SERVER_ADDRESS, host) + + +def _set_http_net_host_port(result, port, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_int_attribute(result, SpanAttributes.NET_HOST_PORT, port) + if _report_new(sem_conv_opt_in_mode): + set_int_attribute(result, SERVER_PORT, port) + + +def _set_http_target(result, target, path, query, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_TARGET, target) + if _report_new(sem_conv_opt_in_mode): + if path: + set_string_attribute(result, URL_PATH, path) + if query: + set_string_attribute(result, URL_QUERY, query) + + +def _set_http_host_server(result, host, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.HTTP_HOST, host) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, CLIENT_ADDRESS, host) + + +# net.peer.ip -> net.sock.peer.addr +# https://github.com/open-telemetry/semantic-conventions/blob/40db676ca0e735aa84f242b5a0fb14e49438b69b/schemas/1.15.0#L18 +# net.sock.peer.addr -> client.socket.address for server spans (TODO) AND client.address if missing +# https://github.com/open-telemetry/semantic-conventions/blob/v1.21.0/CHANGELOG.md#v1210-2023-07-13 +# https://github.com/open-telemetry/semantic-conventions/blob/main/docs/non-normative/http-migration.md#common-attributes-across-http-client-and-server-spans +def _set_http_peer_ip_server(result, ip, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.NET_PEER_IP, ip) + if _report_new(sem_conv_opt_in_mode): + # Only populate if not already populated + if not result.get(CLIENT_ADDRESS): + set_string_attribute(result, CLIENT_ADDRESS, ip) + + +def _set_http_peer_port_server(result, port, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_int_attribute(result, SpanAttributes.NET_PEER_PORT, port) + if _report_new(sem_conv_opt_in_mode): + set_int_attribute(result, CLIENT_PORT, port) + + +def _set_http_net_peer_name_server(result, name, sem_conv_opt_in_mode): + if _report_old(sem_conv_opt_in_mode): + set_string_attribute(result, SpanAttributes.NET_PEER_NAME, name) + if _report_new(sem_conv_opt_in_mode): + set_string_attribute(result, CLIENT_ADDRESS, name) + + +def _set_status( + span, + metrics_attributes: dict, + status_code: int, + status_code_str: str, + server_span: bool = True, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + if status_code < 0: + if _report_new(sem_conv_opt_in_mode): + metrics_attributes[ERROR_TYPE] = status_code_str + if span.is_recording(): + if _report_new(sem_conv_opt_in_mode): + span.set_attribute(ERROR_TYPE, status_code_str) + span.set_status( + Status( + StatusCode.ERROR, + "Non-integer HTTP status: " + status_code_str, + ) + ) + else: + status = http_status_to_status_code( + status_code, server_span=server_span + ) + + if _report_old(sem_conv_opt_in_mode): + if span.is_recording(): + span.set_attribute( + SpanAttributes.HTTP_STATUS_CODE, status_code + ) + metrics_attributes[SpanAttributes.HTTP_STATUS_CODE] = status_code + if _report_new(sem_conv_opt_in_mode): + if span.is_recording(): + span.set_attribute(HTTP_RESPONSE_STATUS_CODE, status_code) + metrics_attributes[HTTP_RESPONSE_STATUS_CODE] = status_code + if status == StatusCode.ERROR: + if span.is_recording(): + span.set_attribute(ERROR_TYPE, status_code_str) + metrics_attributes[ERROR_TYPE] = status_code_str + if span.is_recording(): + span.set_status(Status(status)) + + +# Get schema version based off of opt-in mode +def _get_schema_url(mode: _StabilityMode) -> str: + if mode is _StabilityMode.DEFAULT: + return "https://opentelemetry.io/schemas/1.11.0" + return SpanAttributes.SCHEMA_URL diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/__init__.py new file mode 100644 index 00000000..b0600951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/__init__.py @@ -0,0 +1,991 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=too-many-locals + +""" +The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used +on any ASGI framework (such as Django-channels / Quart) to track request timing through OpenTelemetry. + +Usage (Quart) +------------- + +.. code-block:: python + + from quart import Quart + from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware + + app = Quart(__name__) + app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) + + @app.route("/") + async def hello(): + return "Hello!" + + if __name__ == "__main__": + app.run(debug=True) + + +Usage (Django 3.0) +------------------ + +Modify the application's ``asgi.py`` file as shown below. + +.. code-block:: python + + import os + from django.core.asgi import get_asgi_application + from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware + + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asgi_example.settings') + + application = get_asgi_application() + application = OpenTelemetryMiddleware(application) + + +Usage (Raw ASGI) +---------------- + +.. code-block:: python + + from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware + + app = ... # An ASGI application. + app = OpenTelemetryMiddleware(app) + + +Configuration +------------- + +Request/Response hooks +********************** + +This instrumentation supports request and response hooks. These are functions that get called +right after a span is created for a request and right before the span is finished for the response. + +- The server request hook is passed a server span and ASGI scope object for every incoming request. +- The client request hook is called with the internal span and an ASGI scope when the method ``receive`` is called. +- The client response hook is called with the internal span and an ASGI event when the method ``send`` is called. + +For example, + +.. code-block:: python + + def server_request_hook(span: Span, scope: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_request_hook", "some-value") + + def client_request_hook(span: Span, scope: dict[str, Any], message: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value") + + def client_response_hook(span: Span, scope: dict[str, Any], message: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_response_hook", "some-value") + + OpenTelemetryMiddleware().(application, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook) + +Capture HTTP request and response headers +***************************************** +You can configure the agent to capture specified HTTP headers as span attributes, according to the +`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_. + +Request headers +*************** +To capture HTTP request headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header" + +will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes. + +Request header names in ASGI are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*" + +Would match all request headers that start with ``Accept`` and ``X-``. + +To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*" + +The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +list containing the header values. + +For example: +``http.request.header.custom_request_header = ["<value1>", "<value2>"]`` + +Response headers +**************** +To capture HTTP response headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header" + +will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes. + +Response header names in ASGI are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*" + +Would match all response headers that start with ``Content`` and ``X-``. + +To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*" + +The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +list containing the header values. + +For example: +``http.response.header.custom_response_header = ["<value1>", "<value2>"]`` + +Sanitizing headers +****************** +In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords, +etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS`` +to a comma delimited list of HTTP header names to be sanitized. Regexes may be used, and all header names will be +matched in a case-insensitive manner. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie" + +will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span. + +Note: + The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change. + +API +--- +""" + +from __future__ import annotations + +import typing +import urllib +from collections import defaultdict +from functools import wraps +from timeit import default_timer +from typing import Any, Awaitable, Callable, DefaultDict, Tuple + +from asgiref.compatibility import guarantee_single_callable + +from opentelemetry import context, trace +from opentelemetry.instrumentation._semconv import ( + _filter_semconv_active_request_count_attr, + _filter_semconv_duration_attrs, + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _server_active_requests_count_attrs_new, + _server_active_requests_count_attrs_old, + _server_duration_attrs_new, + _server_duration_attrs_old, + _set_http_flavor_version, + _set_http_host_server, + _set_http_method, + _set_http_net_host_port, + _set_http_peer_ip_server, + _set_http_peer_port_server, + _set_http_scheme, + _set_http_target, + _set_http_url, + _set_http_user_agent, + _set_status, + _StabilityMode, +) +from opentelemetry.instrumentation.asgi.types import ( + ClientRequestHook, + ClientResponseHook, + ServerRequestHook, +) +from opentelemetry.instrumentation.asgi.version import __version__ # noqa +from opentelemetry.instrumentation.propagators import ( + get_global_response_propagator, +) +from opentelemetry.instrumentation.utils import _start_internal_or_server_span +from opentelemetry.metrics import get_meter +from opentelemetry.propagators.textmap import Getter, Setter +from opentelemetry.semconv._incubating.metrics.http_metrics import ( + create_http_server_active_requests, + create_http_server_request_body_size, + create_http_server_response_body_size, +) +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_SERVER_REQUEST_DURATION, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import set_span_in_context +from opentelemetry.util.http import ( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE, + SanitizeValue, + _parse_url_query, + get_custom_headers, + normalise_request_header_name, + normalise_response_header_name, + remove_url_credentials, + sanitize_method, +) + + +class ASGIGetter(Getter[dict]): + def get( + self, carrier: dict, key: str + ) -> typing.Optional[typing.List[str]]: + """Getter implementation to retrieve a HTTP header value from the ASGI + scope. + + Args: + carrier: ASGI scope object + key: header name in scope + Returns: + A list with a single string with the header value if it exists, + else None. + """ + headers = carrier.get("headers") + if not headers: + return None + + # ASGI header keys are in lower case + key = key.lower() + decoded = [ + _decode_header_item(_value) + for (_key, _value) in headers + if _decode_header_item(_key).lower() == key + ] + if not decoded: + return None + return decoded + + def keys(self, carrier: dict) -> typing.List[str]: + headers = carrier.get("headers") or [] + return [_decode_header_item(_key) for (_key, _value) in headers] + + +asgi_getter = ASGIGetter() + + +class ASGISetter(Setter[dict]): + def set(self, carrier: dict, key: str, value: str) -> None: # pylint: disable=no-self-use + """Sets response header values on an ASGI scope according to `the spec <https://asgi.readthedocs.io/en/latest/specs/www.html#response-start-send-event>`_. + + Args: + carrier: ASGI scope object + key: response header name to set + value: response header value + Returns: + None + """ + headers = carrier.get("headers") + if not headers: + headers = [] + carrier["headers"] = headers + + headers.append([key.lower().encode(), value.encode()]) + + +asgi_setter = ASGISetter() + + +# pylint: disable=too-many-branches +def collect_request_attributes( + scope, sem_conv_opt_in_mode=_StabilityMode.DEFAULT +): + """Collects HTTP request attributes from the ASGI scope and returns a + dictionary to be used as span creation attributes.""" + server_host, port, http_url = get_host_port_url_tuple(scope) + query_string = scope.get("query_string") + if query_string and http_url: + if isinstance(query_string, bytes): + query_string = query_string.decode("utf8") + http_url += "?" + urllib.parse.unquote(query_string) + result = {} + + scheme = scope.get("scheme") + if scheme: + _set_http_scheme(result, scheme, sem_conv_opt_in_mode) + if server_host: + _set_http_host_server(result, server_host, sem_conv_opt_in_mode) + if port: + _set_http_net_host_port(result, port, sem_conv_opt_in_mode) + flavor = scope.get("http_version") + if flavor: + _set_http_flavor_version(result, flavor, sem_conv_opt_in_mode) + path = scope.get("path") + if path: + _set_http_target( + result, path, path, query_string, sem_conv_opt_in_mode + ) + if http_url: + if _report_old(sem_conv_opt_in_mode): + _set_http_url( + result, + remove_url_credentials(http_url), + _StabilityMode.DEFAULT, + ) + http_method = scope.get("method", "") + if http_method: + _set_http_method( + result, + http_method, + sanitize_method(http_method), + sem_conv_opt_in_mode, + ) + + http_host_value_list = asgi_getter.get(scope, "host") + if http_host_value_list: + if _report_old(sem_conv_opt_in_mode): + result[SpanAttributes.HTTP_SERVER_NAME] = ",".join( + http_host_value_list + ) + http_user_agent = asgi_getter.get(scope, "user-agent") + if http_user_agent: + _set_http_user_agent(result, http_user_agent[0], sem_conv_opt_in_mode) + + if "client" in scope and scope["client"] is not None: + _set_http_peer_ip_server( + result, scope.get("client")[0], sem_conv_opt_in_mode + ) + _set_http_peer_port_server( + result, scope.get("client")[1], sem_conv_opt_in_mode + ) + + # remove None values + result = {k: v for k, v in result.items() if v is not None} + + return result + + +def collect_custom_headers_attributes( + scope_or_response_message: dict[str, Any], + sanitize: SanitizeValue, + header_regexes: list[str], + normalize_names: Callable[[str], str], +) -> dict[str, list[str]]: + """ + Returns custom HTTP request or response headers to be added into SERVER span as span attributes. + + Refer specifications: + - https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers + """ + headers: DefaultDict[str, list[str]] = defaultdict(list) + raw_headers = scope_or_response_message.get("headers") + if raw_headers: + for key, value in raw_headers: + # Decode headers before processing. + headers[_decode_header_item(key)].append( + _decode_header_item(value) + ) + + return sanitize.sanitize_header_values( + headers, + header_regexes, + normalize_names, + ) + + +def get_host_port_url_tuple(scope): + """Returns (host, port, full_url) tuple.""" + server = scope.get("server") or ["0.0.0.0", 80] + port = server[1] + server_host = server[0] + (":" + str(port) if str(port) != "80" else "") + # using the scope path is enough, see: + # - https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope (see: root_path and path) + # - https://asgi.readthedocs.io/en/latest/specs/www.html#wsgi-compatibility (see: PATH_INFO) + # PATH_INFO can be derived by stripping root_path from path + # -> that means that the path should contain the root_path already, so prefixing it again is not necessary + # - https://wsgi.readthedocs.io/en/latest/definitions.html#envvar-PATH_INFO + full_path = scope.get("path", "") + http_url = scope.get("scheme", "http") + "://" + server_host + full_path + return server_host, port, http_url + + +def set_status_code( + span, + status_code, + metric_attributes=None, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, +): + """Adds HTTP response attributes to span using the status_code argument.""" + status_code_str = str(status_code) + + try: + status_code = int(status_code) + except ValueError: + status_code = -1 + if metric_attributes is None: + metric_attributes = {} + _set_status( + span, + metric_attributes, + status_code, + status_code_str, + server_span=True, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + +def get_default_span_details(scope: dict) -> Tuple[str, dict]: + """ + Default span name is the HTTP method and URL path, or just the method. + https://github.com/open-telemetry/opentelemetry-specification/pull/3165 + https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/#name + + Args: + scope: the ASGI scope dictionary + Returns: + a tuple of the span name, and any attributes to attach to the span. + """ + path = scope.get("path", "").strip() + method = sanitize_method(scope.get("method", "").strip()) + if method == "_OTHER": + method = "HTTP" + if method and path: # http + return f"{method} {path}", {} + if path: # websocket + return path, {} + return method, {} # http with no path + + +def _collect_target_attribute( + scope: typing.Dict[str, typing.Any], +) -> typing.Optional[str]: + """ + Returns the target path as defined by the Semantic Conventions. + + This value is suitable to use in metrics as it should replace concrete + values with a parameterized name. Example: /api/users/{user_id} + + Refer to the specification + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/http-metrics.md#parameterized-attributes + + Note: this function requires specific code for each framework, as there's no + standard attribute to use. + """ + # FastAPI + root_path = scope.get("root_path", "") + + route = scope.get("route") + path_format = getattr(route, "path_format", None) + if path_format: + return f"{root_path}{path_format}" + + return None + + +class OpenTelemetryMiddleware: + """The ASGI application middleware. + + This class is an ASGI middleware that starts and annotates spans for any + requests it is invoked with. + + Args: + app: The ASGI application callable to forward requests to. + default_span_details: Callback which should return a string and a tuple, representing the desired default span name and a + dictionary with any additional span attributes to set. + Optional: Defaults to get_default_span_details. + server_request_hook: Optional callback which is called with the server span and ASGI + scope object for every incoming request. + client_request_hook: Optional callback which is called with the internal span, and ASGI + scope and event which are sent as dictionaries for when the method receive is called. + client_response_hook: Optional callback which is called with the internal span, and ASGI + scope and event which are sent as dictionaries for when the method send is called. + tracer_provider: The optional tracer provider to use. If omitted + the current globally configured one is used. + meter_provider: The optional meter provider to use. If omitted + the current globally configured one is used. + exclude_spans: Optionally exclude HTTP `send` and/or `receive` spans from the trace. + """ + + # pylint: disable=too-many-branches + def __init__( + self, + app, + excluded_urls=None, + default_span_details=None, + server_request_hook: ServerRequestHook = None, + client_request_hook: ClientRequestHook = None, + client_response_hook: ClientResponseHook = None, + tracer_provider=None, + meter_provider=None, + tracer=None, + meter=None, + http_capture_headers_server_request: list[str] | None = None, + http_capture_headers_server_response: list[str] | None = None, + http_capture_headers_sanitize_fields: list[str] | None = None, + exclude_spans: list[typing.Literal["receive", "send"]] | None = None, + ): + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + self.app = guarantee_single_callable(app) + self.tracer = ( + trace.get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + if tracer is None + else tracer + ) + self.meter = ( + get_meter( + __name__, + __version__, + meter_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + if meter is None + else meter + ) + self.duration_histogram_old = None + if _report_old(sem_conv_opt_in_mode): + self.duration_histogram_old = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_DURATION, + unit="ms", + description="Measures the duration of inbound HTTP requests.", + ) + self.duration_histogram_new = None + if _report_new(sem_conv_opt_in_mode): + self.duration_histogram_new = self.meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + description="Duration of HTTP server requests.", + unit="s", + ) + self.server_response_size_histogram = None + if _report_old(sem_conv_opt_in_mode): + self.server_response_size_histogram = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_RESPONSE_SIZE, + unit="By", + description="measures the size of HTTP response messages (compressed).", + ) + self.server_response_body_size_histogram = None + if _report_new(sem_conv_opt_in_mode): + self.server_response_body_size_histogram = ( + create_http_server_response_body_size(self.meter) + ) + self.server_request_size_histogram = None + if _report_old(sem_conv_opt_in_mode): + self.server_request_size_histogram = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_REQUEST_SIZE, + unit="By", + description="Measures the size of HTTP request messages (compressed).", + ) + self.server_request_body_size_histogram = None + if _report_new(sem_conv_opt_in_mode): + self.server_request_body_size_histogram = ( + create_http_server_request_body_size(self.meter) + ) + self.active_requests_counter = create_http_server_active_requests( + self.meter + ) + self.excluded_urls = excluded_urls + self.default_span_details = ( + default_span_details or get_default_span_details + ) + self.server_request_hook = server_request_hook + self.client_request_hook = client_request_hook + self.client_response_hook = client_response_hook + self.content_length_header = None + self._sem_conv_opt_in_mode = sem_conv_opt_in_mode + + # Environment variables as constructor parameters + self.http_capture_headers_server_request = ( + http_capture_headers_server_request + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST + ) + ) + or None + ) + self.http_capture_headers_server_response = ( + http_capture_headers_server_response + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE + ) + ) + or None + ) + self.http_capture_headers_sanitize_fields = SanitizeValue( + http_capture_headers_sanitize_fields + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ) + or [] + ) + self.exclude_receive_span = ( + "receive" in exclude_spans if exclude_spans else False + ) + self.exclude_send_span = ( + "send" in exclude_spans if exclude_spans else False + ) + + # pylint: disable=too-many-statements + async def __call__( + self, + scope: dict[str, Any], + receive: Callable[[], Awaitable[dict[str, Any]]], + send: Callable[[dict[str, Any]], Awaitable[None]], + ) -> None: + """The ASGI application + + Args: + scope: An ASGI environment. + receive: An awaitable callable yielding dictionaries + send: An awaitable callable taking a single dictionary as argument. + """ + start = default_timer() + if scope["type"] not in ("http", "websocket"): + return await self.app(scope, receive, send) + + _, _, url = get_host_port_url_tuple(scope) + if self.excluded_urls and self.excluded_urls.url_disabled(url): + return await self.app(scope, receive, send) + + span_name, additional_attributes = self.default_span_details(scope) + + attributes = collect_request_attributes( + scope, self._sem_conv_opt_in_mode + ) + attributes.update(additional_attributes) + span, token = _start_internal_or_server_span( + tracer=self.tracer, + span_name=span_name, + start_time=None, + context_carrier=scope, + context_getter=asgi_getter, + attributes=attributes, + ) + active_requests_count_attrs = _parse_active_request_count_attrs( + attributes, + self._sem_conv_opt_in_mode, + ) + + if scope["type"] == "http": + self.active_requests_counter.add(1, active_requests_count_attrs) + try: + with trace.use_span(span, end_on_exit=False) as current_span: + if current_span.is_recording(): + for key, value in attributes.items(): + current_span.set_attribute(key, value) + + if current_span.kind == trace.SpanKind.SERVER: + custom_attributes = ( + collect_custom_headers_attributes( + scope, + self.http_capture_headers_sanitize_fields, + self.http_capture_headers_server_request, + normalise_request_header_name, + ) + if self.http_capture_headers_server_request + else {} + ) + if len(custom_attributes) > 0: + current_span.set_attributes(custom_attributes) + + if callable(self.server_request_hook): + self.server_request_hook(current_span, scope) + + otel_receive = self._get_otel_receive( + span_name, scope, receive + ) + + otel_send = self._get_otel_send( + current_span, + span_name, + scope, + send, + attributes, + ) + + await self.app(scope, otel_receive, otel_send) + finally: + if scope["type"] == "http": + target = _collect_target_attribute(scope) + if target: + path, query = _parse_url_query(target) + _set_http_target( + attributes, + target, + path, + query, + self._sem_conv_opt_in_mode, + ) + duration_s = default_timer() - start + duration_attrs_old = _parse_duration_attrs( + attributes, _StabilityMode.DEFAULT + ) + if target: + duration_attrs_old[SpanAttributes.HTTP_TARGET] = target + duration_attrs_new = _parse_duration_attrs( + attributes, _StabilityMode.HTTP + ) + if self.duration_histogram_old: + self.duration_histogram_old.record( + max(round(duration_s * 1000), 0), duration_attrs_old + ) + if self.duration_histogram_new: + self.duration_histogram_new.record( + max(duration_s, 0), duration_attrs_new + ) + self.active_requests_counter.add( + -1, active_requests_count_attrs + ) + if self.content_length_header: + if self.server_response_size_histogram: + self.server_response_size_histogram.record( + self.content_length_header, duration_attrs_old + ) + if self.server_response_body_size_histogram: + self.server_response_body_size_histogram.record( + self.content_length_header, duration_attrs_new + ) + + request_size = asgi_getter.get(scope, "content-length") + if request_size: + try: + request_size_amount = int(request_size[0]) + except ValueError: + pass + else: + if self.server_request_size_histogram: + self.server_request_size_histogram.record( + request_size_amount, duration_attrs_old + ) + if self.server_request_body_size_histogram: + self.server_request_body_size_histogram.record( + request_size_amount, duration_attrs_new + ) + if token: + context.detach(token) + if span.is_recording(): + span.end() + + # pylint: enable=too-many-branches + def _get_otel_receive(self, server_span_name, scope, receive): + if self.exclude_receive_span: + return receive + + @wraps(receive) + async def otel_receive(): + with self.tracer.start_as_current_span( + " ".join((server_span_name, scope["type"], "receive")) + ) as receive_span: + message = await receive() + if callable(self.client_request_hook): + self.client_request_hook(receive_span, scope, message) + if receive_span.is_recording(): + if message["type"] == "websocket.receive": + set_status_code( + receive_span, + 200, + None, + self._sem_conv_opt_in_mode, + ) + receive_span.set_attribute( + "asgi.event.type", message["type"] + ) + return message + + return otel_receive + + def _set_send_span( + self, + server_span_name, + scope, + send, + message, + status_code, + expecting_trailers, + ): + """Set send span attributes and status code.""" + with self.tracer.start_as_current_span( + " ".join((server_span_name, scope["type"], "send")) + ) as send_span: + if callable(self.client_response_hook): + self.client_response_hook(send_span, scope, message) + + if send_span.is_recording(): + if message["type"] == "http.response.start": + expecting_trailers = message.get("trailers", False) + send_span.set_attribute("asgi.event.type", message["type"]) + + if status_code: + set_status_code( + send_span, + status_code, + None, + self._sem_conv_opt_in_mode, + ) + return expecting_trailers + + def _set_server_span( + self, server_span, message, status_code, duration_attrs + ): + """Set server span attributes and status code.""" + if ( + server_span.is_recording() + and server_span.kind == trace.SpanKind.SERVER + and "headers" in message + ): + custom_response_attributes = ( + collect_custom_headers_attributes( + message, + self.http_capture_headers_sanitize_fields, + self.http_capture_headers_server_response, + normalise_response_header_name, + ) + if self.http_capture_headers_server_response + else {} + ) + if len(custom_response_attributes) > 0: + server_span.set_attributes(custom_response_attributes) + + if status_code: + set_status_code( + server_span, + status_code, + duration_attrs, + self._sem_conv_opt_in_mode, + ) + + def _get_otel_send( + self, + server_span, + server_span_name, + scope, + send, + duration_attrs, + ): + expecting_trailers = False + + @wraps(send) + async def otel_send(message: dict[str, Any]): + nonlocal expecting_trailers + + status_code = None + if message["type"] == "http.response.start": + status_code = message["status"] + elif message["type"] == "websocket.send": + status_code = 200 + + if not self.exclude_send_span: + expecting_trailers = self._set_send_span( + server_span_name, + scope, + send, + message, + status_code, + expecting_trailers, + ) + + self._set_server_span( + server_span, message, status_code, duration_attrs + ) + + propagator = get_global_response_propagator() + if propagator: + propagator.inject( + message, + context=set_span_in_context( + server_span, trace.context_api.Context() + ), + setter=asgi_setter, + ) + + content_length = asgi_getter.get(message, "content-length") + if content_length: + try: + self.content_length_header = int(content_length[0]) + except ValueError: + pass + + await send(message) + + # pylint: disable=too-many-boolean-expressions + if ( + not expecting_trailers + and message["type"] == "http.response.body" + and not message.get("more_body", False) + ) or ( + expecting_trailers + and message["type"] == "http.response.trailers" + and not message.get("more_trailers", False) + ): + server_span.end() + + return otel_send + + +def _parse_duration_attrs( + req_attrs, sem_conv_opt_in_mode=_StabilityMode.DEFAULT +): + return _filter_semconv_duration_attrs( + req_attrs, + _server_duration_attrs_old, + _server_duration_attrs_new, + sem_conv_opt_in_mode, + ) + + +def _parse_active_request_count_attrs( + req_attrs, sem_conv_opt_in_mode=_StabilityMode.DEFAULT +): + return _filter_semconv_active_request_count_attr( + req_attrs, + _server_active_requests_count_attrs_old, + _server_active_requests_count_attrs_new, + sem_conv_opt_in_mode, + ) + + +def _decode_header_item(value): + try: + return value.decode("utf-8") + except ValueError: + # ASGI header encoding specs, see: + # - https://asgi.readthedocs.io/en/latest/specs/www.html#wsgi-encoding-differences (see: WSGI encoding differences) + # - https://docs.python.org/3/library/codecs.html#text-encodings (see: Text Encodings) + return value.decode("unicode_escape") diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/package.py new file mode 100644 index 00000000..cd35b1f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/package.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("asgiref ~= 3.0",) + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/types.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/types.py new file mode 100644 index 00000000..bc0c11af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/types.py @@ -0,0 +1,49 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, Optional + +from opentelemetry.trace import Span + +_Scope = Dict[str, Any] +_Message = Dict[str, Any] + +ServerRequestHook = Optional[Callable[[Span, _Scope], None]] +""" +Incoming request callback type. + +Args: + - Server span + - ASGI scope as a mapping +""" + +ClientRequestHook = Optional[Callable[[Span, _Scope, _Message], None]] +""" +Receive callback type. + +Args: + - Internal span + - ASGI scope as a mapping + - ASGI event as a mapping +""" + +ClientResponseHook = Optional[Callable[[Span, _Scope, _Message], None]] +""" +Send callback type. + +Args: + - Internal span + - ASGI scope as a mapping + - ASGI event as a mapping +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/asgi/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/__init__.py new file mode 100644 index 00000000..69af0b4c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/__init__.py @@ -0,0 +1,135 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import REMAINDER, ArgumentParser +from logging import getLogger +from os import environ, execl, getcwd +from os.path import abspath, dirname, pathsep +from re import sub +from shutil import which + +from opentelemetry.instrumentation.auto_instrumentation._load import ( + _load_configurators, + _load_distro, + _load_instrumentors, +) +from opentelemetry.instrumentation.utils import _python_path_without_directory +from opentelemetry.instrumentation.version import __version__ +from opentelemetry.util._importlib_metadata import entry_points + +_logger = getLogger(__name__) + + +def run() -> None: + parser = ArgumentParser( + description=""" + opentelemetry-instrument automatically instruments a Python + program and its dependencies and then runs the program. + """, + epilog=""" + Optional arguments (except for --help and --version) for opentelemetry-instrument + directly correspond with OpenTelemetry environment variables. The + corresponding optional argument is formed by removing the OTEL_ or + OTEL_PYTHON_ prefix from the environment variable and lower casing the + rest. For example, the optional argument --attribute_value_length_limit + corresponds with the environment variable + OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT. + + These optional arguments will override the current value of the + corresponding environment variable during the execution of the command. + """, + ) + + argument_otel_environment_variable = {} + + for entry_point in entry_points( + group="opentelemetry_environment_variables" + ): + environment_variable_module = entry_point.load() + + for attribute in dir(environment_variable_module): + if attribute.startswith("OTEL_"): + argument = sub(r"OTEL_(PYTHON_)?", "", attribute).lower() + + parser.add_argument( + f"--{argument}", + required=False, + ) + argument_otel_environment_variable[argument] = attribute + + parser.add_argument( + "--version", + help="print version information", + action="version", + version="%(prog)s " + __version__, + ) + parser.add_argument("command", help="Your Python application.") + parser.add_argument( + "command_args", + help="Arguments for your application.", + nargs=REMAINDER, + ) + + args = parser.parse_args() + + for argument, otel_environment_variable in ( + argument_otel_environment_variable + ).items(): + value = getattr(args, argument) + if value is not None: + environ[otel_environment_variable] = value + + python_path = environ.get("PYTHONPATH") + + if not python_path: + python_path = [] + + else: + python_path = python_path.split(pathsep) + + cwd_path = getcwd() + + # This is being added to support applications that are being run from their + # own executable, like Django. + # FIXME investigate if there is another way to achieve this + if cwd_path not in python_path: + python_path.insert(0, cwd_path) + + filedir_path = dirname(abspath(__file__)) + + python_path = [path for path in python_path if path != filedir_path] + + python_path.insert(0, filedir_path) + + environ["PYTHONPATH"] = pathsep.join(python_path) + + executable = which(args.command) + execl(executable, executable, *args.command_args) + + +def initialize(): + """Setup auto-instrumentation, called by the sitecustomize module""" + # prevents auto-instrumentation of subprocesses if code execs another python process + if "PYTHONPATH" in environ: + environ["PYTHONPATH"] = _python_path_without_directory( + environ["PYTHONPATH"], dirname(abspath(__file__)), pathsep + ) + + try: + distro = _load_distro() + distro.configure() + _load_configurators() + _load_instrumentors(distro) + except Exception: # pylint: disable=broad-except + _logger.exception("Failed to auto initialize OpenTelemetry") diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/_load.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/_load.py new file mode 100644 index 00000000..3d602b2a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/_load.py @@ -0,0 +1,164 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import cached_property +from logging import getLogger +from os import environ + +from opentelemetry.instrumentation.dependencies import ( + get_dist_dependency_conflicts, +) +from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro +from opentelemetry.instrumentation.environment_variables import ( + OTEL_PYTHON_CONFIGURATOR, + OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, + OTEL_PYTHON_DISTRO, +) +from opentelemetry.instrumentation.version import __version__ +from opentelemetry.util._importlib_metadata import ( + EntryPoint, + distributions, + entry_points, +) + +_logger = getLogger(__name__) + + +class _EntryPointDistFinder: + @cached_property + def _mapping(self): + return { + self._key_for(ep): dist + for dist in distributions() + for ep in dist.entry_points + } + + def dist_for(self, entry_point: EntryPoint): + dist = getattr(entry_point, "dist", None) + if dist: + return dist + + return self._mapping.get(self._key_for(entry_point)) + + @staticmethod + def _key_for(entry_point: EntryPoint): + return f"{entry_point.group}:{entry_point.name}:{entry_point.value}" + + +def _load_distro() -> BaseDistro: + distro_name = environ.get(OTEL_PYTHON_DISTRO, None) + for entry_point in entry_points(group="opentelemetry_distro"): + try: + # If no distro is specified, use first to come up. + if distro_name is None or distro_name == entry_point.name: + distro = entry_point.load()() + if not isinstance(distro, BaseDistro): + _logger.debug( + "%s is not an OpenTelemetry Distro. Skipping", + entry_point.name, + ) + continue + _logger.debug( + "Distribution %s will be configured", entry_point.name + ) + return distro + except Exception as exc: # pylint: disable=broad-except + _logger.exception( + "Distribution %s configuration failed", entry_point.name + ) + raise exc + return DefaultDistro() + + +def _load_instrumentors(distro): + package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, []) + entry_point_finder = _EntryPointDistFinder() + if isinstance(package_to_exclude, str): + package_to_exclude = package_to_exclude.split(",") + # to handle users entering "requests , flask" or "requests, flask" with spaces + package_to_exclude = [x.strip() for x in package_to_exclude] + + for entry_point in entry_points(group="opentelemetry_pre_instrument"): + entry_point.load()() + + for entry_point in entry_points(group="opentelemetry_instrumentor"): + if entry_point.name in package_to_exclude: + _logger.debug( + "Instrumentation skipped for library %s", entry_point.name + ) + continue + + try: + entry_point_dist = entry_point_finder.dist_for(entry_point) + conflict = get_dist_dependency_conflicts(entry_point_dist) + if conflict: + _logger.debug( + "Skipping instrumentation %s: %s", + entry_point.name, + conflict, + ) + continue + + # tell instrumentation to not run dep checks again as we already did it above + distro.load_instrumentor(entry_point, skip_dep_check=True) + _logger.debug("Instrumented %s", entry_point.name) + except ImportError: + # in scenarios using the kubernetes operator to do autoinstrumentation some + # instrumentors (usually requiring binary extensions) may fail to load + # because the injected autoinstrumentation code does not match the application + # environment regarding python version, libc, etc... In this case it's better + # to skip the single instrumentation rather than failing to load everything + # so treat differently ImportError than the rest of exceptions + _logger.exception( + "Importing of %s failed, skipping it", entry_point.name + ) + continue + except Exception as exc: # pylint: disable=broad-except + _logger.exception("Instrumenting of %s failed", entry_point.name) + raise exc + + for entry_point in entry_points(group="opentelemetry_post_instrument"): + entry_point.load()() + + +def _load_configurators(): + configurator_name = environ.get(OTEL_PYTHON_CONFIGURATOR, None) + configured = None + for entry_point in entry_points(group="opentelemetry_configurator"): + if configured is not None: + _logger.warning( + "Configuration of %s not loaded, %s already loaded", + entry_point.name, + configured, + ) + continue + try: + if ( + configurator_name is None + or configurator_name == entry_point.name + ): + entry_point.load()().configure( + auto_instrumentation_version=__version__ + ) # type: ignore + configured = entry_point.name + else: + _logger.warning( + "Configuration of %s not loaded because %s is set by %s", + entry_point.name, + configurator_name, + OTEL_PYTHON_CONFIGURATOR, + ) + except Exception as exc: # pylint: disable=broad-except + _logger.exception("Configuration of %s failed", entry_point.name) + raise exc diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py new file mode 100644 index 00000000..c126b873 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py @@ -0,0 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.instrumentation.auto_instrumentation import initialize + +initialize() diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap.py new file mode 100644 index 00000000..cc0ac68f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap.py @@ -0,0 +1,186 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import sys +from subprocess import ( + PIPE, + CalledProcessError, + Popen, + SubprocessError, + check_call, +) +from typing import Optional + +from packaging.requirements import Requirement + +from opentelemetry.instrumentation.bootstrap_gen import ( + default_instrumentations as gen_default_instrumentations, +) +from opentelemetry.instrumentation.bootstrap_gen import ( + libraries as gen_libraries, +) +from opentelemetry.instrumentation.version import __version__ +from opentelemetry.util._importlib_metadata import ( + PackageNotFoundError, + version, +) + +logger = logging.getLogger(__name__) + + +def _syscall(func): + def wrapper(package=None): + try: + if package: + return func(package) + return func() + except SubprocessError as exp: + cmd = getattr(exp, "cmd", None) + if cmd: + msg = f'Error calling system command "{" ".join(cmd)}"' + if package: + msg = f'{msg} for package "{package}"' + raise RuntimeError(msg) + + return wrapper + + +@_syscall +def _sys_pip_install(package): + # explicit upgrade strategy to override potential pip config + try: + check_call( + [ + sys.executable, + "-m", + "pip", + "install", + "-U", + "--upgrade-strategy", + "only-if-needed", + package, + ] + ) + except CalledProcessError as error: + print(error) + + +def _pip_check(libraries): + """Ensures none of the instrumentations have dependency conflicts. + Clean check reported as: + 'No broken requirements found.' + Dependency conflicts are reported as: + 'opentelemetry-instrumentation-flask 1.0.1 has requirement opentelemetry-sdk<2.0,>=1.0, but you have opentelemetry-sdk 0.5.' + To not be too restrictive, we'll only check for relevant packages. + """ + with Popen( + [sys.executable, "-m", "pip", "check"], stdout=PIPE + ) as check_pipe: + pip_check = check_pipe.communicate()[0].decode() + pip_check_lower = pip_check.lower() + for package_tup in libraries: + for package in package_tup: + if package.lower() in pip_check_lower: + raise RuntimeError(f"Dependency conflict found: {pip_check}") + + +def _is_installed(req): + req = Requirement(req) + + try: + dist_version = version(req.name) + except PackageNotFoundError: + return False + + if not req.specifier.filter(dist_version): + logger.warning( + "instrumentation for package %s is available" + " but version %s is installed. Skipping.", + req, + dist_version, + ) + return False + return True + + +def _find_installed_libraries(default_instrumentations, libraries): + for lib in default_instrumentations: + yield lib + + for lib in libraries: + if _is_installed(lib["library"]): + yield lib["instrumentation"] + + +def _run_requirements(default_instrumentations, libraries): + logger.setLevel(logging.ERROR) + print( + "\n".join( + _find_installed_libraries(default_instrumentations, libraries) + ) + ) + + +def _run_install(default_instrumentations, libraries): + for lib in _find_installed_libraries(default_instrumentations, libraries): + _sys_pip_install(lib) + _pip_check(libraries) + + +def run( + default_instrumentations: Optional[list] = None, + libraries: Optional[list] = None, +) -> None: + action_install = "install" + action_requirements = "requirements" + + parser = argparse.ArgumentParser( + description=""" + opentelemetry-bootstrap detects installed libraries and automatically + installs the relevant instrumentation packages for them. + """ + ) + parser.add_argument( + "--version", + help="print version information", + action="version", + version="%(prog)s " + __version__, + ) + parser.add_argument( + "-a", + "--action", + choices=[action_install, action_requirements], + default=action_requirements, + help=""" + install - uses pip to install the new requirements using to the + currently active site-package. + requirements - prints out the new requirements to stdout. Action can + be piped and appended to a requirements.txt file. + """, + ) + args = parser.parse_args() + + if libraries is None: + libraries = gen_libraries + + if default_instrumentations is None: + default_instrumentations = gen_default_instrumentations + + cmd = { + action_install: _run_install, + action_requirements: _run_requirements, + }[args.action] + cmd(default_instrumentations, libraries) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap_gen.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap_gen.py new file mode 100644 index 00000000..a6b45788 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/bootstrap_gen.py @@ -0,0 +1,220 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES. +# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. + +libraries = [ + { + "library": "openai >= 1.26.0", + "instrumentation": "opentelemetry-instrumentation-openai-v2", + }, + { + "library": "google-cloud-aiplatform >= 1.64", + "instrumentation": "opentelemetry-instrumentation-vertexai>=2.0b0", + }, + { + "library": "aio_pika >= 7.2.0, < 10.0.0", + "instrumentation": "opentelemetry-instrumentation-aio-pika==0.52b1", + }, + { + "library": "aiohttp ~= 3.0", + "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.52b1", + }, + { + "library": "aiohttp ~= 3.0", + "instrumentation": "opentelemetry-instrumentation-aiohttp-server==0.52b1", + }, + { + "library": "aiokafka >= 0.8, < 1.0", + "instrumentation": "opentelemetry-instrumentation-aiokafka==0.52b1", + }, + { + "library": "aiopg >= 0.13.0, < 2.0.0", + "instrumentation": "opentelemetry-instrumentation-aiopg==0.52b1", + }, + { + "library": "asgiref ~= 3.0", + "instrumentation": "opentelemetry-instrumentation-asgi==0.52b1", + }, + { + "library": "asyncpg >= 0.12.0", + "instrumentation": "opentelemetry-instrumentation-asyncpg==0.52b1", + }, + { + "library": "boto~=2.0", + "instrumentation": "opentelemetry-instrumentation-boto==0.52b1", + }, + { + "library": "boto3 ~= 1.0", + "instrumentation": "opentelemetry-instrumentation-boto3sqs==0.52b1", + }, + { + "library": "botocore ~= 1.0", + "instrumentation": "opentelemetry-instrumentation-botocore==0.52b1", + }, + { + "library": "cassandra-driver ~= 3.25", + "instrumentation": "opentelemetry-instrumentation-cassandra==0.52b1", + }, + { + "library": "scylla-driver ~= 3.25", + "instrumentation": "opentelemetry-instrumentation-cassandra==0.52b1", + }, + { + "library": "celery >= 4.0, < 6.0", + "instrumentation": "opentelemetry-instrumentation-celery==0.52b1", + }, + { + "library": "click >= 8.1.3, < 9.0.0", + "instrumentation": "opentelemetry-instrumentation-click==0.52b1", + }, + { + "library": "confluent-kafka >= 1.8.2, <= 2.7.0", + "instrumentation": "opentelemetry-instrumentation-confluent-kafka==0.52b1", + }, + { + "library": "django >= 1.10", + "instrumentation": "opentelemetry-instrumentation-django==0.52b1", + }, + { + "library": "elasticsearch >= 6.0", + "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.52b1", + }, + { + "library": "falcon >= 1.4.1, < 5.0.0", + "instrumentation": "opentelemetry-instrumentation-falcon==0.52b1", + }, + { + "library": "fastapi ~= 0.58", + "instrumentation": "opentelemetry-instrumentation-fastapi==0.52b1", + }, + { + "library": "flask >= 1.0", + "instrumentation": "opentelemetry-instrumentation-flask==0.52b1", + }, + { + "library": "grpcio >= 1.42.0", + "instrumentation": "opentelemetry-instrumentation-grpc==0.52b1", + }, + { + "library": "httpx >= 0.18.0", + "instrumentation": "opentelemetry-instrumentation-httpx==0.52b1", + }, + { + "library": "jinja2 >= 2.7, < 4.0", + "instrumentation": "opentelemetry-instrumentation-jinja2==0.52b1", + }, + { + "library": "kafka-python >= 2.0, < 3.0", + "instrumentation": "opentelemetry-instrumentation-kafka-python==0.52b1", + }, + { + "library": "kafka-python-ng >= 2.0, < 3.0", + "instrumentation": "opentelemetry-instrumentation-kafka-python==0.52b1", + }, + { + "library": "mysql-connector-python >= 8.0, < 10.0", + "instrumentation": "opentelemetry-instrumentation-mysql==0.52b1", + }, + { + "library": "mysqlclient < 3", + "instrumentation": "opentelemetry-instrumentation-mysqlclient==0.52b1", + }, + { + "library": "pika >= 0.12.0", + "instrumentation": "opentelemetry-instrumentation-pika==0.52b1", + }, + { + "library": "psycopg >= 3.1.0", + "instrumentation": "opentelemetry-instrumentation-psycopg==0.52b1", + }, + { + "library": "psycopg2 >= 2.7.3.1", + "instrumentation": "opentelemetry-instrumentation-psycopg2==0.52b1", + }, + { + "library": "psycopg2-binary >= 2.7.3.1", + "instrumentation": "opentelemetry-instrumentation-psycopg2==0.52b1", + }, + { + "library": "pymemcache >= 1.3.5, < 5", + "instrumentation": "opentelemetry-instrumentation-pymemcache==0.52b1", + }, + { + "library": "pymongo >= 3.1, < 5.0", + "instrumentation": "opentelemetry-instrumentation-pymongo==0.52b1", + }, + { + "library": "pymssql >= 2.1.5, < 3", + "instrumentation": "opentelemetry-instrumentation-pymssql==0.52b1", + }, + { + "library": "PyMySQL < 2", + "instrumentation": "opentelemetry-instrumentation-pymysql==0.52b1", + }, + { + "library": "pyramid >= 1.7", + "instrumentation": "opentelemetry-instrumentation-pyramid==0.52b1", + }, + { + "library": "redis >= 2.6", + "instrumentation": "opentelemetry-instrumentation-redis==0.52b1", + }, + { + "library": "remoulade >= 0.50", + "instrumentation": "opentelemetry-instrumentation-remoulade==0.52b1", + }, + { + "library": "requests ~= 2.0", + "instrumentation": "opentelemetry-instrumentation-requests==0.52b1", + }, + { + "library": "sqlalchemy >= 1.0.0, < 2.1.0", + "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.52b1", + }, + { + "library": "starlette >= 0.13, <0.15", + "instrumentation": "opentelemetry-instrumentation-starlette==0.52b1", + }, + { + "library": "psutil >= 5", + "instrumentation": "opentelemetry-instrumentation-system-metrics==0.52b1", + }, + { + "library": "tornado >= 5.1.1", + "instrumentation": "opentelemetry-instrumentation-tornado==0.52b1", + }, + { + "library": "tortoise-orm >= 0.17.0", + "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.52b1", + }, + { + "library": "pydantic >= 1.10.2", + "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.52b1", + }, + { + "library": "urllib3 >= 1.0.0, < 3.0.0", + "instrumentation": "opentelemetry-instrumentation-urllib3==0.52b1", + }, +] +default_instrumentations = [ + "opentelemetry-instrumentation-asyncio==0.52b1", + "opentelemetry-instrumentation-dbapi==0.52b1", + "opentelemetry-instrumentation-logging==0.52b1", + "opentelemetry-instrumentation-sqlite3==0.52b1", + "opentelemetry-instrumentation-threading==0.52b1", + "opentelemetry-instrumentation-urllib==0.52b1", + "opentelemetry-instrumentation-wsgi==0.52b1", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/__init__.py new file mode 100644 index 00000000..c7b1dee3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/__init__.py @@ -0,0 +1,631 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The trace integration with Database API supports libraries that follow the +Python Database API Specification v2.0. +`<https://www.python.org/dev/peps/pep-0249/>`_ + +Usage +----- + +.. code-block:: python + + import mysql.connector + import pyodbc + + from opentelemetry.instrumentation.dbapi import trace_integration + + + # Ex: mysql.connector + trace_integration(mysql.connector, "connect", "mysql") + # Ex: pyodbc + trace_integration(pyodbc, "Connection", "odbc") + +API +--- +""" + +from __future__ import annotations + +import functools +import logging +import re +from typing import Any, Callable, Generic, TypeVar + +import wrapt +from wrapt import wrap_function_wrapper + +from opentelemetry import trace as trace_api +from opentelemetry.instrumentation.dbapi.version import __version__ +from opentelemetry.instrumentation.sqlcommenter_utils import _add_sql_comment +from opentelemetry.instrumentation.utils import ( + _get_opentelemetry_values, + unwrap, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import SpanKind, TracerProvider, get_tracer +from opentelemetry.util._importlib_metadata import version as util_version + +_DB_DRIVER_ALIASES = { + "MySQLdb": "mysqlclient", +} + +_logger = logging.getLogger(__name__) + +ConnectionT = TypeVar("ConnectionT") +CursorT = TypeVar("CursorT") + + +def trace_integration( + connect_module: Callable[..., Any], + connect_method_name: str, + database_system: str, + connection_attributes: dict[str, Any] | None = None, + tracer_provider: TracerProvider | None = None, + capture_parameters: bool = False, + enable_commenter: bool = False, + db_api_integration_factory: type[DatabaseApiIntegration] | None = None, + enable_attribute_commenter: bool = False, +): + """Integrate with DB API library. + https://www.python.org/dev/peps/pep-0249/ + + Args: + connect_module: Module name where connect method is available. + connect_method_name: The connect method name. + database_system: An identifier for the database management system (DBMS) + product being used. + connection_attributes: Attribute names for database, port, host and + user in Connection object. + tracer_provider: The :class:`opentelemetry.trace.TracerProvider` to + use. If omitted the current configured one is used. + capture_parameters: Configure if db.statement.parameters should be captured. + enable_commenter: Flag to enable/disable sqlcommenter. + db_api_integration_factory: The `DatabaseApiIntegration` to use. If none is passed the + default one is used. + enable_attribute_commenter: Flag to enable/disable sqlcomment inclusion in `db.statement` span attribute. Only available if enable_commenter=True. + """ + wrap_connect( + __name__, + connect_module, + connect_method_name, + database_system, + connection_attributes, + version=__version__, + tracer_provider=tracer_provider, + capture_parameters=capture_parameters, + enable_commenter=enable_commenter, + db_api_integration_factory=db_api_integration_factory, + enable_attribute_commenter=enable_attribute_commenter, + ) + + +def wrap_connect( + name: str, + connect_module: Callable[..., Any], + connect_method_name: str, + database_system: str, + connection_attributes: dict[str, Any] | None = None, + version: str = "", + tracer_provider: TracerProvider | None = None, + capture_parameters: bool = False, + enable_commenter: bool = False, + db_api_integration_factory: type[DatabaseApiIntegration] | None = None, + commenter_options: dict[str, Any] | None = None, + enable_attribute_commenter: bool = False, +): + """Integrate with DB API library. + https://www.python.org/dev/peps/pep-0249/ + + Args: + connect_module: Module name where connect method is available. + connect_method_name: The connect method name. + database_system: An identifier for the database management system (DBMS) + product being used. + connection_attributes: Attribute names for database, port, host and + user in Connection object. + tracer_provider: The :class:`opentelemetry.trace.TracerProvider` to + use. If omitted the current configured one is used. + capture_parameters: Configure if db.statement.parameters should be captured. + enable_commenter: Flag to enable/disable sqlcommenter. + db_api_integration_factory: The `DatabaseApiIntegration` to use. If none is passed the + default one is used. + commenter_options: Configurations for tags to be appended at the sql query. + enable_attribute_commenter: Flag to enable/disable sqlcomment inclusion in `db.statement` span attribute. Only available if enable_commenter=True. + + """ + db_api_integration_factory = ( + db_api_integration_factory or DatabaseApiIntegration + ) + + # pylint: disable=unused-argument + def wrap_connect_( + wrapped: Callable[..., Any], + instance: Any, + args: tuple[Any, Any], + kwargs: dict[Any, Any], + ): + db_integration = db_api_integration_factory( + name, + database_system, + connection_attributes=connection_attributes, + version=version, + tracer_provider=tracer_provider, + capture_parameters=capture_parameters, + enable_commenter=enable_commenter, + commenter_options=commenter_options, + connect_module=connect_module, + enable_attribute_commenter=enable_attribute_commenter, + ) + return db_integration.wrapped_connection(wrapped, args, kwargs) + + try: + wrap_function_wrapper( + connect_module, connect_method_name, wrap_connect_ + ) + except Exception as ex: # pylint: disable=broad-except + _logger.warning("Failed to integrate with DB API. %s", str(ex)) + + +def unwrap_connect( + connect_module: Callable[..., Any], connect_method_name: str +): + """Disable integration with DB API library. + https://www.python.org/dev/peps/pep-0249/ + + Args: + connect_module: Module name where the connect method is available. + connect_method_name: The connect method name. + """ + unwrap(connect_module, connect_method_name) + + +def instrument_connection( + name: str, + connection: ConnectionT | TracedConnectionProxy[ConnectionT], + database_system: str, + connection_attributes: dict[str, Any] | None = None, + version: str = "", + tracer_provider: TracerProvider | None = None, + capture_parameters: bool = False, + enable_commenter: bool = False, + commenter_options: dict[str, Any] | None = None, + connect_module: Callable[..., Any] | None = None, + enable_attribute_commenter: bool = False, + db_api_integration_factory: type[DatabaseApiIntegration] | None = None, +) -> TracedConnectionProxy[ConnectionT]: + """Enable instrumentation in a database connection. + + Args: + name: The instrumentation module name. + connection: The connection to instrument. + database_system: An identifier for the database management system (DBMS) + product being used. + connection_attributes: Attribute names for database, port, host and + user in a connection object. + tracer_provider: The :class:`opentelemetry.trace.TracerProvider` to + use. If omitted the current configured one is used. + capture_parameters: Configure if db.statement.parameters should be captured. + enable_commenter: Flag to enable/disable sqlcommenter. + commenter_options: Configurations for tags to be appended at the sql query. + connect_module: Module name where connect method is available. + enable_attribute_commenter: Flag to enable/disable sqlcomment inclusion in `db.statement` span attribute. Only available if enable_commenter=True. + db_api_integration_factory: A class or factory function to use as a + replacement for :class:`DatabaseApiIntegration`. Can be used to + obtain connection attributes from the connect method instead of + from the connection itself (as done by the pymssql intrumentor). + + Returns: + An instrumented connection. + """ + if isinstance(connection, wrapt.ObjectProxy): + _logger.warning("Connection already instrumented") + return connection + + db_api_integration_factory = ( + db_api_integration_factory or DatabaseApiIntegration + ) + + db_integration = db_api_integration_factory( + name, + database_system, + connection_attributes=connection_attributes, + version=version, + tracer_provider=tracer_provider, + capture_parameters=capture_parameters, + enable_commenter=enable_commenter, + commenter_options=commenter_options, + connect_module=connect_module, + enable_attribute_commenter=enable_attribute_commenter, + ) + db_integration.get_connection_attributes(connection) + return get_traced_connection_proxy(connection, db_integration) + + +def uninstrument_connection( + connection: ConnectionT | TracedConnectionProxy[ConnectionT], +) -> ConnectionT: + """Disable instrumentation in a database connection. + + Args: + connection: The connection to uninstrument. + + Returns: + An uninstrumented connection. + """ + if isinstance(connection, wrapt.ObjectProxy): + return connection.__wrapped__ + + _logger.warning("Connection is not instrumented") + return connection + + +class DatabaseApiIntegration: + def __init__( + self, + name: str, + database_system: str, + connection_attributes: dict[str, Any] | None = None, + version: str = "", + tracer_provider: TracerProvider | None = None, + capture_parameters: bool = False, + enable_commenter: bool = False, + commenter_options: dict[str, Any] | None = None, + connect_module: Callable[..., Any] | None = None, + enable_attribute_commenter: bool = False, + ): + if connection_attributes is None: + self.connection_attributes = { + "database": "database", + "port": "port", + "host": "host", + "user": "user", + } + else: + self.connection_attributes = connection_attributes + self._name = name + self._version = version + self._tracer = get_tracer( + self._name, + instrumenting_library_version=self._version, + tracer_provider=tracer_provider, + schema_url="https://opentelemetry.io/schemas/1.11.0", + ) + self.capture_parameters = capture_parameters + self.enable_commenter = enable_commenter + self.commenter_options = commenter_options + self.enable_attribute_commenter = enable_attribute_commenter + self.database_system = database_system + self.connection_props: dict[str, Any] = {} + self.span_attributes: dict[str, Any] = {} + self.name = "" + self.database = "" + self.connect_module = connect_module + self.commenter_data = self.calculate_commenter_data() + + def _get_db_version(self, db_driver: str) -> str: + if db_driver in _DB_DRIVER_ALIASES: + return util_version(_DB_DRIVER_ALIASES[db_driver]) + db_version = "" + try: + db_version = self.connect_module.__version__ + except AttributeError: + db_version = "unknown" + return db_version + + def calculate_commenter_data(self) -> dict[str, Any]: + commenter_data: dict[str, Any] = {} + if not self.enable_commenter: + return commenter_data + + db_driver = getattr(self.connect_module, "__name__", "unknown") + db_version = self._get_db_version(db_driver) + + commenter_data = { + "db_driver": f"{db_driver}:{db_version.split(' ')[0]}", + # PEP 249-compliant drivers should have the following attributes. + # We can assume apilevel "1.0" if not given. + # We use "unknown" for others to prevent uncaught AttributeError. + # https://peps.python.org/pep-0249/#globals + "dbapi_threadsafety": getattr( + self.connect_module, "threadsafety", "unknown" + ), + "dbapi_level": getattr(self.connect_module, "apilevel", "1.0"), + "driver_paramstyle": getattr( + self.connect_module, "paramstyle", "unknown" + ), + } + + if self.database_system == "postgresql": + if hasattr(self.connect_module, "__libpq_version__"): + libpq_version = self.connect_module.__libpq_version__ + else: + libpq_version = self.connect_module.pq.__build_version__ + commenter_data.update({"libpq_version": libpq_version}) + elif self.database_system == "mysql": + mysqlc_version = "" + if db_driver == "MySQLdb": + mysqlc_version = self.connect_module._mysql.get_client_info() + elif db_driver == "pymysql": + mysqlc_version = self.connect_module.get_client_info() + + commenter_data.update({"mysql_client_version": mysqlc_version}) + + return commenter_data + + def wrapped_connection( + self, + connect_method: Callable[..., ConnectionT], + args: tuple[Any, ...], + kwargs: dict[Any, Any], + ) -> TracedConnectionProxy[ConnectionT]: + """Add object proxy to connection object.""" + connection = connect_method(*args, **kwargs) + self.get_connection_attributes(connection) + return get_traced_connection_proxy(connection, self) + + def get_connection_attributes(self, connection: object) -> None: + # Populate span fields using connection + for key, value in self.connection_attributes.items(): + # Allow attributes nested in connection object + attribute = functools.reduce( + lambda attribute, attribute_value: getattr( + attribute, attribute_value, None + ), + value.split("."), + connection, + ) + if attribute: + self.connection_props[key] = attribute + self.name = self.database_system + self.database = self.connection_props.get("database", "") + if self.database: + # PyMySQL encodes names with utf-8 + if hasattr(self.database, "decode"): + self.database = self.database.decode(errors="ignore") + self.name += "." + self.database + user = self.connection_props.get("user") + # PyMySQL encodes this data + if user and isinstance(user, bytes): + user = user.decode() + if user is not None: + self.span_attributes[SpanAttributes.DB_USER] = str(user) + host = self.connection_props.get("host") + if host is not None: + self.span_attributes[SpanAttributes.NET_PEER_NAME] = host + port = self.connection_props.get("port") + if port is not None: + self.span_attributes[SpanAttributes.NET_PEER_PORT] = port + + +# pylint: disable=abstract-method +class TracedConnectionProxy(wrapt.ObjectProxy, Generic[ConnectionT]): + # pylint: disable=unused-argument + def __init__( + self, + connection: ConnectionT, + db_api_integration: DatabaseApiIntegration | None = None, + ): + wrapt.ObjectProxy.__init__(self, connection) + self._self_db_api_integration = db_api_integration + + def __getattribute__(self, name: str): + if object.__getattribute__(self, name): + return object.__getattribute__(self, name) + + return object.__getattribute__( + object.__getattribute__(self, "_connection"), name + ) + + def cursor(self, *args: Any, **kwargs: Any): + return get_traced_cursor_proxy( + self.__wrapped__.cursor(*args, **kwargs), + self._self_db_api_integration, + ) + + def __enter__(self): + self.__wrapped__.__enter__() + return self + + def __exit__(self, *args: Any, **kwargs: Any): + self.__wrapped__.__exit__(*args, **kwargs) + + +def get_traced_connection_proxy( + connection: ConnectionT, + db_api_integration: DatabaseApiIntegration | None, + *args: Any, + **kwargs: Any, +) -> TracedConnectionProxy[ConnectionT]: + return TracedConnectionProxy(connection, db_api_integration) + + +class CursorTracer(Generic[CursorT]): + def __init__(self, db_api_integration: DatabaseApiIntegration) -> None: + self._db_api_integration = db_api_integration + self._commenter_enabled = self._db_api_integration.enable_commenter + self._commenter_options = ( + self._db_api_integration.commenter_options + if self._db_api_integration.commenter_options + else {} + ) + self._enable_attribute_commenter = ( + self._db_api_integration.enable_attribute_commenter + ) + self._connect_module = self._db_api_integration.connect_module + self._leading_comment_remover = re.compile(r"^/\*.*?\*/") + + def _capture_mysql_version(self, cursor) -> None: + """Lazy capture of mysql-connector client version using cursor, if applicable""" + if ( + self._db_api_integration.database_system == "mysql" + and self._db_api_integration.connect_module.__name__ + == "mysql.connector" + and not self._db_api_integration.commenter_data[ + "mysql_client_version" + ] + ): + self._db_api_integration.commenter_data["mysql_client_version"] = ( + cursor._cnx._cmysql.get_client_info() + ) + + def _get_commenter_data(self) -> dict: + """Uses DB-API integration to return commenter data for sqlcomment""" + commenter_data = dict(self._db_api_integration.commenter_data) + if self._commenter_options.get("opentelemetry_values", True): + commenter_data.update(**_get_opentelemetry_values()) + return { + k: v + for k, v in commenter_data.items() + if self._commenter_options.get(k, True) + } + + def _update_args_with_added_sql_comment(self, args, cursor) -> tuple: + """Updates args with cursor info and adds sqlcomment to query statement""" + try: + args_list = list(args) + self._capture_mysql_version(cursor) + commenter_data = self._get_commenter_data() + statement = _add_sql_comment(args_list[0], **commenter_data) + args_list[0] = statement + args = tuple(args_list) + except Exception as exc: # pylint: disable=broad-except + _logger.exception( + "Exception while generating sql comment: %s", exc + ) + return args + + def _populate_span( + self, + span: trace_api.Span, + cursor: CursorT, + *args: tuple[Any, ...], + ): + if not span.is_recording(): + return + statement = self.get_statement(cursor, args) + span.set_attribute( + SpanAttributes.DB_SYSTEM, self._db_api_integration.database_system + ) + span.set_attribute( + SpanAttributes.DB_NAME, self._db_api_integration.database + ) + span.set_attribute(SpanAttributes.DB_STATEMENT, statement) + + for ( + attribute_key, + attribute_value, + ) in self._db_api_integration.span_attributes.items(): + span.set_attribute(attribute_key, attribute_value) + + if self._db_api_integration.capture_parameters and len(args) > 1: + span.set_attribute("db.statement.parameters", str(args[1])) + + def get_operation_name( + self, cursor: CursorT, args: tuple[Any, ...] + ) -> str: # pylint: disable=no-self-use + if args and isinstance(args[0], str): + # Strip leading comments so we get the operation name. + return self._leading_comment_remover.sub("", args[0]).split()[0] + return "" + + def get_statement(self, cursor: CursorT, args: tuple[Any, ...]): # pylint: disable=no-self-use + if not args: + return "" + statement = args[0] + if isinstance(statement, bytes): + return statement.decode("utf8", "replace") + return statement + + def traced_execution( + self, + cursor: CursorT, + query_method: Callable[..., Any], + *args: tuple[Any, ...], + **kwargs: dict[Any, Any], + ): + name = self.get_operation_name(cursor, args) + if not name: + name = ( + self._db_api_integration.database + if self._db_api_integration.database + else self._db_api_integration.name + ) + + with self._db_api_integration._tracer.start_as_current_span( + name, kind=SpanKind.CLIENT + ) as span: + if span.is_recording(): + if args and self._commenter_enabled: + if self._enable_attribute_commenter: + # sqlcomment is added to executed query and db.statement span attribute + args = self._update_args_with_added_sql_comment( + args, cursor + ) + self._populate_span(span, cursor, *args) + else: + # sqlcomment is only added to executed query + # so db.statement is set before add_sql_comment + self._populate_span(span, cursor, *args) + args = self._update_args_with_added_sql_comment( + args, cursor + ) + else: + # no sqlcomment anywhere + self._populate_span(span, cursor, *args) + return query_method(*args, **kwargs) + + +# pylint: disable=abstract-method +class TracedCursorProxy(wrapt.ObjectProxy, Generic[CursorT]): + # pylint: disable=unused-argument + def __init__( + self, + cursor: CursorT, + db_api_integration: DatabaseApiIntegration, + ): + wrapt.ObjectProxy.__init__(self, cursor) + self._self_cursor_tracer = CursorTracer[CursorT](db_api_integration) + + def execute(self, *args: Any, **kwargs: Any): + return self._self_cursor_tracer.traced_execution( + self.__wrapped__, self.__wrapped__.execute, *args, **kwargs + ) + + def executemany(self, *args: Any, **kwargs: Any): + return self._self_cursor_tracer.traced_execution( + self.__wrapped__, self.__wrapped__.executemany, *args, **kwargs + ) + + def callproc(self, *args: Any, **kwargs: Any): + return self._self_cursor_tracer.traced_execution( + self.__wrapped__, self.__wrapped__.callproc, *args, **kwargs + ) + + def __enter__(self): + self.__wrapped__.__enter__() + return self + + def __exit__(self, *args, **kwargs): + self.__wrapped__.__exit__(*args, **kwargs) + + +def get_traced_cursor_proxy( + cursor: CursorT, + db_api_integration: DatabaseApiIntegration, + *args: Any, + **kwargs: Any, +) -> TracedCursorProxy[CursorT]: + return TracedCursorProxy(cursor, db_api_integration) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/package.py new file mode 100644 index 00000000..7a66a17a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/package.py @@ -0,0 +1,16 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = tuple() diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/version.py new file mode 100644 index 00000000..bc1d59fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dbapi/version.py @@ -0,0 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" + +_instruments = tuple() diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dependencies.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dependencies.py new file mode 100644 index 00000000..b7e4cff4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/dependencies.py @@ -0,0 +1,86 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from logging import getLogger +from typing import Collection + +from packaging.requirements import InvalidRequirement, Requirement + +from opentelemetry.util._importlib_metadata import ( + Distribution, + PackageNotFoundError, + version, +) + +logger = getLogger(__name__) + + +class DependencyConflict: + required: str | None = None + found: str | None = None + + def __init__(self, required: str | None, found: str | None = None): + self.required = required + self.found = found + + def __str__(self): + return f'DependencyConflict: requested: "{self.required}" but found: "{self.found}"' + + +def get_dist_dependency_conflicts( + dist: Distribution, +) -> DependencyConflict | None: + instrumentation_deps = [] + extra = "extra" + instruments = "instruments" + instruments_marker = {extra: instruments} + if dist.requires: + for dep in dist.requires: + if extra not in dep or instruments not in dep: + continue + + req = Requirement(dep) + if req.marker.evaluate(instruments_marker): + instrumentation_deps.append(req) + + return get_dependency_conflicts(instrumentation_deps) + + +def get_dependency_conflicts( + deps: Collection[str | Requirement], +) -> DependencyConflict | None: + for dep in deps: + if isinstance(dep, Requirement): + req = dep + else: + try: + req = Requirement(dep) + except InvalidRequirement as exc: + logger.warning( + 'error parsing dependency, reporting as a conflict: "%s" - %s', + dep, + exc, + ) + return DependencyConflict(dep) + + try: + dist_version = version(req.name) + except PackageNotFoundError: + return DependencyConflict(dep) + + if not req.specifier.contains(dist_version): + return DependencyConflict(dep, f"{req.name} {dist_version}") + return None diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/distro.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/distro.py new file mode 100644 index 00000000..1b450f25 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/distro.py @@ -0,0 +1,70 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# type: ignore + +""" +OpenTelemetry Base Distribution (Distro) +""" + +from abc import ABC, abstractmethod +from logging import getLogger + +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.util._importlib_metadata import EntryPoint + +_LOG = getLogger(__name__) + + +class BaseDistro(ABC): + """An ABC for distro""" + + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = object.__new__(cls, *args, **kwargs) + + return cls._instance + + @abstractmethod + def _configure(self, **kwargs): + """Configure the distribution""" + + def configure(self, **kwargs): + """Configure the distribution""" + self._configure(**kwargs) + + def load_instrumentor( # pylint: disable=no-self-use + self, entry_point: EntryPoint, **kwargs + ): + """Takes an instrumentation entry point and activates it by instantiating + and calling instrument() on it. + This is called for each opentelemetry_instrumentor entry point by auto + instrumentation. + + Distros can override this method to customize the behavior by + inspecting each entry point and configuring them in special ways, + passing additional arguments, load a replacement/fork instead, + skip loading entirely, etc. + """ + instrumentor: BaseInstrumentor = entry_point.load() + instrumentor().instrument(**kwargs) + + +class DefaultDistro(BaseDistro): + def _configure(self, **kwargs): + pass + + +__all__ = ["BaseDistro", "DefaultDistro"] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/__init__.py new file mode 100644 index 00000000..3b9af412 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/__init__.py @@ -0,0 +1,447 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +Instrument `django`_ to trace Django applications. + +.. _django: https://pypi.org/project/django/ + +SQLCOMMENTER +***************************************** +You can optionally configure Django instrumentation to enable sqlcommenter which enriches +the query with contextual information. + +Usage +----- + +.. code:: python + + from opentelemetry.instrumentation.django import DjangoInstrumentor + + DjangoInstrumentor().instrument(is_sql_commentor_enabled=True) + + +For example, +:: + + Invoking Users().objects.all() will lead to sql query "select * from auth_users" but when SQLCommenter is enabled + the query will get appended with some configurable tags like "select * from auth_users /*metrics=value*/;" + + +SQLCommenter Configurations +*************************** +We can configure the tags to be appended to the sqlquery log by adding below variables to the settings.py + +SQLCOMMENTER_WITH_FRAMEWORK = True(Default) or False + +For example, +:: +Enabling this flag will add django framework and it's version which is /*framework='django%3A2.2.3*/ + +SQLCOMMENTER_WITH_CONTROLLER = True(Default) or False + +For example, +:: +Enabling this flag will add controller name that handles the request /*controller='index'*/ + +SQLCOMMENTER_WITH_ROUTE = True(Default) or False + +For example, +:: +Enabling this flag will add url path that handles the request /*route='polls/'*/ + +SQLCOMMENTER_WITH_APP_NAME = True(Default) or False + +For example, +:: +Enabling this flag will add app name that handles the request /*app_name='polls'*/ + +SQLCOMMENTER_WITH_OPENTELEMETRY = True(Default) or False + +For example, +:: +Enabling this flag will add opentelemetry traceparent /*traceparent='00-fd720cffceba94bbf75940ff3caaf3cc-4fd1a2bdacf56388-01'*/ + +SQLCOMMENTER_WITH_DB_DRIVER = True(Default) or False + +For example, +:: +Enabling this flag will add name of the db driver /*db_driver='django.db.backends.postgresql'*/ + +Usage +----- + +.. code:: python + + from opentelemetry.instrumentation.django import DjangoInstrumentor + + DjangoInstrumentor().instrument() + + +Configuration +------------- + +Exclude lists +************* +To exclude certain URLs from tracking, set the environment variable ``OTEL_PYTHON_DJANGO_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` to cover all instrumentations) to a string of comma delimited regexes that match the +URLs. + +For example, + +:: + + export OTEL_PYTHON_DJANGO_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +Request attributes +******************** +To extract attributes from Django's request object and use them as span attributes, set the environment variable +``OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS`` to a comma delimited list of request attribute names. + +For example, + +:: + + export OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS='path_info,content_type' + +will extract the ``path_info`` and ``content_type`` attributes from every traced request and add them as span attributes. + +Django Request object reference: https://docs.djangoproject.com/en/3.1/ref/request-response/#attributes + +Request and Response hooks +*************************** +This instrumentation supports request and response hooks. These are functions that get called +right after a span is created for a request and right before the span is finished for the response. +The hooks can be configured as follows: + +.. code:: python + + from opentelemetry.instrumentation.django import DjangoInstrumentor + + def request_hook(span, request): + pass + + def response_hook(span, request, response): + pass + + DjangoInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook) + +Django Request object: https://docs.djangoproject.com/en/3.1/ref/request-response/#httprequest-objects +Django Response object: https://docs.djangoproject.com/en/3.1/ref/request-response/#httpresponse-objects + +Capture HTTP request and response headers +***************************************** +You can configure the agent to capture specified HTTP headers as span attributes, according to the +`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_. + +Request headers +*************** +To capture HTTP request headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header" + +will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes. + +Request header names in Django are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*" + +Would match all request headers that start with ``Accept`` and ``X-``. + +To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*" + +The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.request.header.custom_request_header = ["<value1>,<value2>"]`` + +Response headers +**************** +To capture HTTP response headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header" + +will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes. + +Response header names in Django are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*" + +Would match all response headers that start with ``Content`` and ``X-``. + +To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*" + +The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.response.header.custom_response_header = ["<value1>,<value2>"]`` + +Sanitizing headers +****************** +In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords, +etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS`` +to a comma delimited list of HTTP header names to be sanitized. Regexes may be used, and all header names will be +matched in a case-insensitive manner. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie" + +will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span. + +Note: + The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change. + +API +--- + +""" + +from logging import getLogger +from os import environ +from typing import Collection + +from django import VERSION as django_version +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured + +from opentelemetry.instrumentation._semconv import ( + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, +) +from opentelemetry.instrumentation.django.environment_variables import ( + OTEL_PYTHON_DJANGO_INSTRUMENT, +) +from opentelemetry.instrumentation.django.middleware.otel_middleware import ( + _DjangoMiddleware, +) +from opentelemetry.instrumentation.django.package import _instruments +from opentelemetry.instrumentation.django.version import __version__ +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.metrics import get_meter +from opentelemetry.semconv._incubating.metrics.http_metrics import ( + create_http_server_active_requests, +) +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_SERVER_REQUEST_DURATION, +) +from opentelemetry.trace import get_tracer +from opentelemetry.util.http import get_excluded_urls, parse_excluded_urls + +DJANGO_2_0 = django_version >= (2, 0) + +_excluded_urls_from_env = get_excluded_urls("DJANGO") +_logger = getLogger(__name__) + + +def _get_django_middleware_setting() -> str: + # In Django versions 1.x, setting MIDDLEWARE_CLASSES can be used as a legacy + # alternative to MIDDLEWARE. This is the case when `settings.MIDDLEWARE` has + # its default value (`None`). + if not DJANGO_2_0 and getattr(settings, "MIDDLEWARE", None) is None: + return "MIDDLEWARE_CLASSES" + return "MIDDLEWARE" + + +def _get_django_otel_middleware_position( + middleware_length, default_middleware_position=0 +): + otel_position = environ.get("OTEL_PYTHON_DJANGO_MIDDLEWARE_POSITION") + try: + middleware_position = int(otel_position) + except (ValueError, TypeError): + _logger.debug( + "Invalid OTEL_PYTHON_DJANGO_MIDDLEWARE_POSITION value: (%s). Using default position: %d.", + otel_position, + default_middleware_position, + ) + middleware_position = default_middleware_position + + if middleware_position < 0 or middleware_position > middleware_length: + _logger.debug( + "Middleware position %d is out of range (0-%d). Using 0 as the position", + middleware_position, + middleware_length, + ) + middleware_position = 0 + return middleware_position + + +class DjangoInstrumentor(BaseInstrumentor): + """An instrumentor for Django + + See `BaseInstrumentor` + """ + + _opentelemetry_middleware = ".".join( + [_DjangoMiddleware.__module__, _DjangoMiddleware.__qualname__] + ) + + _sql_commenter_middleware = "opentelemetry.instrumentation.django.middleware.sqlcommenter_middleware.SqlCommenter" + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + # FIXME this is probably a pattern that will show up in the rest of the + # ext. Find a better way of implementing this. + if environ.get(OTEL_PYTHON_DJANGO_INSTRUMENT) == "False": + return + + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + + tracer_provider = kwargs.get("tracer_provider") + meter_provider = kwargs.get("meter_provider") + _excluded_urls = kwargs.get("excluded_urls") + tracer = get_tracer( + __name__, + __version__, + tracer_provider=tracer_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + meter = get_meter( + __name__, + __version__, + meter_provider=meter_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + _DjangoMiddleware._sem_conv_opt_in_mode = sem_conv_opt_in_mode + _DjangoMiddleware._tracer = tracer + _DjangoMiddleware._meter = meter + _DjangoMiddleware._excluded_urls = ( + _excluded_urls_from_env + if _excluded_urls is None + else parse_excluded_urls(_excluded_urls) + ) + _DjangoMiddleware._otel_request_hook = kwargs.pop("request_hook", None) + _DjangoMiddleware._otel_response_hook = kwargs.pop( + "response_hook", None + ) + _DjangoMiddleware._duration_histogram_old = None + if _report_old(sem_conv_opt_in_mode): + _DjangoMiddleware._duration_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_DURATION, + unit="ms", + description="Measures the duration of inbound HTTP requests.", + ) + _DjangoMiddleware._duration_histogram_new = None + if _report_new(sem_conv_opt_in_mode): + _DjangoMiddleware._duration_histogram_new = meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + description="Duration of HTTP server requests.", + unit="s", + ) + _DjangoMiddleware._active_request_counter = ( + create_http_server_active_requests(meter) + ) + # This can not be solved, but is an inherent problem of this approach: + # the order of middleware entries matters, and here you have no control + # on that: + # https://docs.djangoproject.com/en/3.0/topics/http/middleware/#activating-middleware + # https://docs.djangoproject.com/en/3.0/ref/middleware/#middleware-ordering + + _middleware_setting = _get_django_middleware_setting() + settings_middleware = [] + try: + settings_middleware = getattr(settings, _middleware_setting, []) + except ImproperlyConfigured as exception: + _logger.debug( + "DJANGO_SETTINGS_MODULE environment variable not configured. Defaulting to empty settings: %s", + exception, + ) + settings.configure() + settings_middleware = getattr(settings, _middleware_setting, []) + except ModuleNotFoundError as exception: + _logger.debug( + "DJANGO_SETTINGS_MODULE points to a non-existent module. Defaulting to empty settings: %s", + exception, + ) + settings.configure() + settings_middleware = getattr(settings, _middleware_setting, []) + + # Django allows to specify middlewares as a tuple, so we convert this tuple to a + # list, otherwise we wouldn't be able to call append/remove + if isinstance(settings_middleware, tuple): + settings_middleware = list(settings_middleware) + + is_sql_commentor_enabled = kwargs.pop("is_sql_commentor_enabled", None) + + middleware_position = _get_django_otel_middleware_position( + len(settings_middleware), kwargs.pop("middleware_position", 0) + ) + + if is_sql_commentor_enabled: + settings_middleware.insert( + middleware_position, self._sql_commenter_middleware + ) + + settings_middleware.insert( + middleware_position, self._opentelemetry_middleware + ) + + setattr(settings, _middleware_setting, settings_middleware) + + def _uninstrument(self, **kwargs): + _middleware_setting = _get_django_middleware_setting() + settings_middleware = getattr(settings, _middleware_setting, None) + + # FIXME This is starting to smell like trouble. We have 2 mechanisms + # that may make this condition be True, one implemented in + # BaseInstrumentor and another one implemented in _instrument. Both + # stop _instrument from running and thus, settings_middleware not being + # set. + if settings_middleware is None or ( + self._opentelemetry_middleware not in settings_middleware + ): + return + + settings_middleware.remove(self._opentelemetry_middleware) + setattr(settings, _middleware_setting, settings_middleware) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/environment_variables.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/environment_variables.py new file mode 100644 index 00000000..4972a62e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/environment_variables.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OTEL_PYTHON_DJANGO_INSTRUMENT = "OTEL_PYTHON_DJANGO_INSTRUMENT" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/__init__.py diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/otel_middleware.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/otel_middleware.py new file mode 100644 index 00000000..f6070469 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/otel_middleware.py @@ -0,0 +1,476 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import types +from logging import getLogger +from time import time +from timeit import default_timer +from typing import Callable + +from django import VERSION as django_version +from django.http import HttpRequest, HttpResponse + +from opentelemetry.context import detach +from opentelemetry.instrumentation._semconv import ( + _filter_semconv_active_request_count_attr, + _filter_semconv_duration_attrs, + _report_new, + _report_old, + _server_active_requests_count_attrs_new, + _server_active_requests_count_attrs_old, + _server_duration_attrs_new, + _server_duration_attrs_old, + _StabilityMode, +) +from opentelemetry.instrumentation.propagators import ( + get_global_response_propagator, +) +from opentelemetry.instrumentation.utils import ( + _start_internal_or_server_span, + extract_attributes_from_object, +) +from opentelemetry.instrumentation.wsgi import ( + add_response_attributes, + wsgi_getter, +) +from opentelemetry.instrumentation.wsgi import ( + collect_custom_request_headers_attributes as wsgi_collect_custom_request_headers_attributes, +) +from opentelemetry.instrumentation.wsgi import ( + collect_custom_response_headers_attributes as wsgi_collect_custom_response_headers_attributes, +) +from opentelemetry.instrumentation.wsgi import ( + collect_request_attributes as wsgi_collect_request_attributes, +) +from opentelemetry.semconv.attributes.http_attributes import HTTP_ROUTE +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import Span, SpanKind, use_span +from opentelemetry.util.http import ( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE, + SanitizeValue, + get_custom_headers, + get_excluded_urls, + get_traced_request_attrs, + normalise_request_header_name, + normalise_response_header_name, + sanitize_method, +) + +try: + from django.core.urlresolvers import ( # pylint: disable=no-name-in-module + Resolver404, + resolve, + ) +except ImportError: + from django.urls import Resolver404, resolve + +DJANGO_2_0 = django_version >= (2, 0) +DJANGO_3_0 = django_version >= (3, 0) + +if DJANGO_2_0: + # Since Django 2.0, only `settings.MIDDLEWARE` is supported, so new-style + # middlewares can be used. + class MiddlewareMixin: + def __init__(self, get_response): + self.get_response = get_response + + def __call__(self, request): + self.process_request(request) + response = self.get_response(request) + return self.process_response(request, response) + +else: + # Django versions 1.x can use `settings.MIDDLEWARE_CLASSES` and expect + # old-style middlewares, which are created by inheriting from + # `deprecation.MiddlewareMixin` since its creation in Django 1.10 and 1.11, + # or from `object` for older versions. + try: + from django.utils.deprecation import MiddlewareMixin + except ImportError: + MiddlewareMixin = object + +if DJANGO_3_0: + from django.core.handlers.asgi import ASGIRequest +else: + ASGIRequest = None + +# try/except block exclusive for optional ASGI imports. +try: + from opentelemetry.instrumentation.asgi import ( + asgi_getter, + asgi_setter, + set_status_code, + ) + from opentelemetry.instrumentation.asgi import ( + collect_custom_headers_attributes as asgi_collect_custom_headers_attributes, + ) + from opentelemetry.instrumentation.asgi import ( + collect_request_attributes as asgi_collect_request_attributes, + ) + + _is_asgi_supported = True +except ImportError: + asgi_getter = None + asgi_collect_request_attributes = None + set_status_code = None + _is_asgi_supported = False + +_logger = getLogger(__name__) + + +def _is_asgi_request(request: HttpRequest) -> bool: + return ASGIRequest is not None and isinstance(request, ASGIRequest) + + +class _DjangoMiddleware(MiddlewareMixin): + """Django Middleware for OpenTelemetry""" + + _environ_activation_key = ( + "opentelemetry-instrumentor-django.activation_key" + ) + _environ_token = "opentelemetry-instrumentor-django.token" + _environ_span_key = "opentelemetry-instrumentor-django.span_key" + _environ_exception_key = "opentelemetry-instrumentor-django.exception_key" + _environ_active_request_attr_key = ( + "opentelemetry-instrumentor-django.active_request_attr_key" + ) + _environ_duration_attr_key = ( + "opentelemetry-instrumentor-django.duration_attr_key" + ) + _environ_timer_key = "opentelemetry-instrumentor-django.timer_key" + _traced_request_attrs = get_traced_request_attrs("DJANGO") + _excluded_urls = get_excluded_urls("DJANGO") + _tracer = None + _meter = None + _duration_histogram_old = None + _duration_histogram_new = None + _active_request_counter = None + _sem_conv_opt_in_mode = _StabilityMode.DEFAULT + + _otel_request_hook: Callable[[Span, HttpRequest], None] = None + _otel_response_hook: Callable[[Span, HttpRequest, HttpResponse], None] = ( + None + ) + + @staticmethod + def _get_span_name(request): + method = sanitize_method(request.method.strip()) + if method == "_OTHER": + return "HTTP" + try: + if getattr(request, "resolver_match"): + match = request.resolver_match + else: + match = resolve(request.path) + + if hasattr(match, "route") and match.route: + return f"{method} {match.route}" + + if hasattr(match, "url_name") and match.url_name: + return f"{method} {match.url_name}" + + return request.method + + except Resolver404: + return request.method + + # pylint: disable=too-many-locals + # pylint: disable=too-many-branches + def process_request(self, request): + # request.META is a dictionary containing all available HTTP headers + # Read more about request.META here: + # https://docs.djangoproject.com/en/3.0/ref/request-response/#django.http.HttpRequest.META + + if self._excluded_urls.url_disabled(request.build_absolute_uri("?")): + return + + is_asgi_request = _is_asgi_request(request) + if not _is_asgi_supported and is_asgi_request: + return + + # pylint:disable=W0212 + request._otel_start_time = time() + request_meta = request.META + + if is_asgi_request: + carrier = request.scope + carrier_getter = asgi_getter + collect_request_attributes = asgi_collect_request_attributes + else: + carrier = request_meta + carrier_getter = wsgi_getter + collect_request_attributes = wsgi_collect_request_attributes + + attributes = collect_request_attributes( + carrier, + self._sem_conv_opt_in_mode, + ) + span, token = _start_internal_or_server_span( + tracer=self._tracer, + span_name=self._get_span_name(request), + start_time=request_meta.get( + "opentelemetry-instrumentor-django.starttime_key" + ), + context_carrier=carrier, + context_getter=carrier_getter, + attributes=attributes, + ) + + active_requests_count_attrs = _parse_active_request_count_attrs( + attributes, + self._sem_conv_opt_in_mode, + ) + + request.META[self._environ_active_request_attr_key] = ( + active_requests_count_attrs + ) + # Pass all of attributes to duration key because we will filter during response + request.META[self._environ_duration_attr_key] = attributes + self._active_request_counter.add(1, active_requests_count_attrs) + if span.is_recording(): + attributes = extract_attributes_from_object( + request, self._traced_request_attrs, attributes + ) + if is_asgi_request: + # ASGI requests include extra attributes in request.scope.headers. + attributes = extract_attributes_from_object( + types.SimpleNamespace( + **{ + name.decode("latin1"): value.decode("latin1") + for name, value in request.scope.get("headers", []) + } + ), + self._traced_request_attrs, + attributes, + ) + if span.is_recording() and span.kind == SpanKind.SERVER: + attributes.update( + asgi_collect_custom_headers_attributes( + carrier, + SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ), + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST + ), + normalise_request_header_name, + ) + ) + else: + if span.is_recording() and span.kind == SpanKind.SERVER: + custom_attributes = ( + wsgi_collect_custom_request_headers_attributes(carrier) + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + + for key, value in attributes.items(): + span.set_attribute(key, value) + + activation = use_span(span, end_on_exit=True) + activation.__enter__() # pylint: disable=E1101 + request_start_time = default_timer() + request.META[self._environ_timer_key] = request_start_time + request.META[self._environ_activation_key] = activation + request.META[self._environ_span_key] = span + if token: + request.META[self._environ_token] = token + + if _DjangoMiddleware._otel_request_hook: + try: + _DjangoMiddleware._otel_request_hook( # pylint: disable=not-callable + span, request + ) + except Exception: # pylint: disable=broad-exception-caught + # Raising an exception here would leak the request span since process_response + # would not be called. Log the exception instead. + _logger.exception("Exception raised by request_hook") + + # pylint: disable=unused-argument + def process_view(self, request, view_func, *args, **kwargs): + # Process view is executed before the view function, here we get the + # route template from request.resolver_match. It is not set yet in process_request + if self._excluded_urls.url_disabled(request.build_absolute_uri("?")): + return + + if ( + self._environ_activation_key in request.META.keys() + and self._environ_span_key in request.META.keys() + ): + span = request.META[self._environ_span_key] + + match = getattr(request, "resolver_match", None) + if match: + route = getattr(match, "route", None) + if route: + if span.is_recording(): + # http.route is present for both old and new semconv + span.set_attribute(SpanAttributes.HTTP_ROUTE, route) + duration_attrs = request.META[ + self._environ_duration_attr_key + ] + if _report_old(self._sem_conv_opt_in_mode): + duration_attrs[SpanAttributes.HTTP_TARGET] = route + if _report_new(self._sem_conv_opt_in_mode): + duration_attrs[HTTP_ROUTE] = route + + def process_exception(self, request, exception): + if self._excluded_urls.url_disabled(request.build_absolute_uri("?")): + return + + if self._environ_activation_key in request.META.keys(): + request.META[self._environ_exception_key] = exception + + # pylint: disable=too-many-branches + # pylint: disable=too-many-locals + # pylint: disable=too-many-statements + def process_response(self, request, response): + if self._excluded_urls.url_disabled(request.build_absolute_uri("?")): + return response + + is_asgi_request = _is_asgi_request(request) + if not _is_asgi_supported and is_asgi_request: + return response + + activation = request.META.pop(self._environ_activation_key, None) + span = request.META.pop(self._environ_span_key, None) + active_requests_count_attrs = request.META.pop( + self._environ_active_request_attr_key, None + ) + duration_attrs = request.META.pop( + self._environ_duration_attr_key, None + ) + request_start_time = request.META.pop(self._environ_timer_key, None) + + if activation and span: + if is_asgi_request: + set_status_code( + span, + response.status_code, + metric_attributes=duration_attrs, + sem_conv_opt_in_mode=self._sem_conv_opt_in_mode, + ) + + if span.is_recording() and span.kind == SpanKind.SERVER: + custom_headers = {} + for key, value in response.items(): + asgi_setter.set(custom_headers, key, value) + + custom_res_attributes = asgi_collect_custom_headers_attributes( + custom_headers, + SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ), + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE + ), + normalise_response_header_name, + ) + for key, value in custom_res_attributes.items(): + span.set_attribute(key, value) + else: + add_response_attributes( + span, + f"{response.status_code} {response.reason_phrase}", + response.items(), + duration_attrs=duration_attrs, + sem_conv_opt_in_mode=self._sem_conv_opt_in_mode, + ) + if span.is_recording() and span.kind == SpanKind.SERVER: + custom_attributes = ( + wsgi_collect_custom_response_headers_attributes( + response.items() + ) + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + + propagator = get_global_response_propagator() + if propagator: + propagator.inject(response) + + # record any exceptions raised while processing the request + exception = request.META.pop(self._environ_exception_key, None) + + if _DjangoMiddleware._otel_response_hook: + try: + _DjangoMiddleware._otel_response_hook( # pylint: disable=not-callable + span, request, response + ) + except Exception: # pylint: disable=broad-exception-caught + _logger.exception("Exception raised by response_hook") + + if exception: + activation.__exit__( + type(exception), + exception, + getattr(exception, "__traceback__", None), + ) + else: + activation.__exit__(None, None, None) + + if request_start_time is not None: + duration_s = default_timer() - request_start_time + if self._duration_histogram_old: + duration_attrs_old = _parse_duration_attrs( + duration_attrs, _StabilityMode.DEFAULT + ) + # http.target to be included in old semantic conventions + target = duration_attrs.get(SpanAttributes.HTTP_TARGET) + if target: + duration_attrs_old[SpanAttributes.HTTP_TARGET] = target + self._duration_histogram_old.record( + max(round(duration_s * 1000), 0), duration_attrs_old + ) + if self._duration_histogram_new: + duration_attrs_new = _parse_duration_attrs( + duration_attrs, _StabilityMode.HTTP + ) + self._duration_histogram_new.record( + max(duration_s, 0), duration_attrs_new + ) + self._active_request_counter.add(-1, active_requests_count_attrs) + if request.META.get(self._environ_token, None) is not None: + detach(request.META.get(self._environ_token)) + request.META.pop(self._environ_token) + + return response + + +def _parse_duration_attrs( + req_attrs, sem_conv_opt_in_mode=_StabilityMode.DEFAULT +): + return _filter_semconv_duration_attrs( + req_attrs, + _server_duration_attrs_old, + _server_duration_attrs_new, + sem_conv_opt_in_mode, + ) + + +def _parse_active_request_count_attrs( + req_attrs, sem_conv_opt_in_mode=_StabilityMode.DEFAULT +): + return _filter_semconv_active_request_count_attr( + req_attrs, + _server_active_requests_count_attrs_old, + _server_active_requests_count_attrs_new, + sem_conv_opt_in_mode, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/sqlcommenter_middleware.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/sqlcommenter_middleware.py new file mode 100644 index 00000000..ef53d5dc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/middleware/sqlcommenter_middleware.py @@ -0,0 +1,123 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from contextlib import ExitStack +from logging import getLogger +from typing import Any, Type, TypeVar + +# pylint: disable=no-name-in-module +from django import conf, get_version +from django.db import connections +from django.db.backends.utils import CursorDebugWrapper + +from opentelemetry.instrumentation.sqlcommenter_utils import _add_sql_comment +from opentelemetry.instrumentation.utils import _get_opentelemetry_values +from opentelemetry.trace.propagation.tracecontext import ( + TraceContextTextMapPropagator, +) + +_propagator = TraceContextTextMapPropagator() + +_django_version = get_version() +_logger = getLogger(__name__) + +T = TypeVar("T") # pylint: disable-msg=invalid-name + + +class SqlCommenter: + """ + Middleware to append a comment to each database query with details about + the framework and the execution context. + """ + + def __init__(self, get_response) -> None: + self.get_response = get_response + + def __call__(self, request) -> Any: + with ExitStack() as stack: + for db_alias in connections: + stack.enter_context( + connections[db_alias].execute_wrapper( + _QueryWrapper(request) + ) + ) + return self.get_response(request) + + +class _QueryWrapper: + def __init__(self, request) -> None: + self.request = request + + def __call__(self, execute: Type[T], sql, params, many, context) -> T: + # pylint: disable-msg=too-many-locals + with_framework = getattr( + conf.settings, "SQLCOMMENTER_WITH_FRAMEWORK", True + ) + with_controller = getattr( + conf.settings, "SQLCOMMENTER_WITH_CONTROLLER", True + ) + with_route = getattr(conf.settings, "SQLCOMMENTER_WITH_ROUTE", True) + with_app_name = getattr( + conf.settings, "SQLCOMMENTER_WITH_APP_NAME", True + ) + with_opentelemetry = getattr( + conf.settings, "SQLCOMMENTER_WITH_OPENTELEMETRY", True + ) + with_db_driver = getattr( + conf.settings, "SQLCOMMENTER_WITH_DB_DRIVER", True + ) + + db_driver = context["connection"].settings_dict.get("ENGINE", "") + resolver_match = self.request.resolver_match + + sql = _add_sql_comment( + sql, + # Information about the controller. + controller=( + resolver_match.view_name + if resolver_match and with_controller + else None + ), + # route is the pattern that matched a request with a controller i.e. the regex + # See https://docs.djangoproject.com/en/stable/ref/urlresolvers/#django.urls.ResolverMatch.route + # getattr() because the attribute doesn't exist in Django < 2.2. + route=( + getattr(resolver_match, "route", None) + if resolver_match and with_route + else None + ), + # app_name is the application namespace for the URL pattern that matches the URL. + # See https://docs.djangoproject.com/en/stable/ref/urlresolvers/#django.urls.ResolverMatch.app_name + app_name=( + (resolver_match.app_name or None) + if resolver_match and with_app_name + else None + ), + # Framework centric information. + framework=f"django:{_django_version}" if with_framework else None, + # Information about the database and driver. + db_driver=db_driver if with_db_driver else None, + **_get_opentelemetry_values() if with_opentelemetry else {}, + ) + + # TODO: MySQL truncates logs > 1024B so prepend comments + # instead of statements, if the engine is MySQL. + # See: + # * https://github.com/basecamp/marginalia/issues/61 + # * https://github.com/basecamp/marginalia/pull/80 + + # Add the query to the query log if debugging. + if isinstance(context["cursor"], CursorDebugWrapper): + context["connection"].queries_log.append(sql) + + return execute(sql, params, many, context) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/package.py new file mode 100644 index 00000000..290061a3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/package.py @@ -0,0 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("django >= 1.10",) +_supports_metrics = True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/django/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/environment_variables.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/environment_variables.py new file mode 100644 index 00000000..78867796 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/environment_variables.py @@ -0,0 +1,28 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OTEL_PYTHON_DISABLED_INSTRUMENTATIONS = "OTEL_PYTHON_DISABLED_INSTRUMENTATIONS" +""" +.. envvar:: OTEL_PYTHON_DISABLED_INSTRUMENTATIONS +""" + +OTEL_PYTHON_DISTRO = "OTEL_PYTHON_DISTRO" +""" +.. envvar:: OTEL_PYTHON_DISTRO +""" + +OTEL_PYTHON_CONFIGURATOR = "OTEL_PYTHON_CONFIGURATOR" +""" +.. envvar:: OTEL_PYTHON_CONFIGURATOR +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/__init__.py new file mode 100644 index 00000000..a19480b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/__init__.py @@ -0,0 +1,456 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage +----- + +.. code-block:: python + + import fastapi + from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor + + app = fastapi.FastAPI() + + @app.get("/foobar") + async def foobar(): + return {"message": "hello world"} + + FastAPIInstrumentor.instrument_app(app) + +Configuration +------------- + +Exclude lists +************* +To exclude certain URLs from tracking, set the environment variable ``OTEL_PYTHON_FASTAPI_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` to cover all instrumentations) to a string of comma delimited regexes that match the +URLs. + +For example, + +:: + + export OTEL_PYTHON_FASTAPI_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +You can also pass comma delimited regexes directly to the ``instrument_app`` method: + +.. code-block:: python + + FastAPIInstrumentor.instrument_app(app, excluded_urls="client/.*/info,healthcheck") + +Request/Response hooks +********************** + +This instrumentation supports request and response hooks. These are functions that get called +right after a span is created for a request and right before the span is finished for the response. + +- The server request hook is passed a server span and ASGI scope object for every incoming request. +- The client request hook is called with the internal span, and ASGI scope and event when the method ``receive`` is called. +- The client response hook is called with the internal span, and ASGI scope and event when the method ``send`` is called. + +.. code-block:: python + + def server_request_hook(span: Span, scope: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_request_hook", "some-value") + + def client_request_hook(span: Span, scope: dict[str, Any], message: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value") + + def client_response_hook(span: Span, scope: dict[str, Any], message: dict[str, Any]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_response_hook", "some-value") + + FastAPIInstrumentor().instrument(server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook) + +Capture HTTP request and response headers +***************************************** +You can configure the agent to capture specified HTTP headers as span attributes, according to the +`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_. + +Request headers +*************** +To capture HTTP request headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names, +or pass the ``http_capture_headers_server_request`` keyword argument to the ``instrument_app`` method. + +For example using the environment variable, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header" + +will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes. + +Request header names in FastAPI are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*" + +Would match all request headers that start with ``Accept`` and ``X-``. + +To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*" + +The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.request.header.custom_request_header = ["<value1>", "<value2>"]`` + +Response headers +**************** +To capture HTTP response headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names, +or pass the ``http_capture_headers_server_response`` keyword argument to the ``instrument_app`` method. + +For example using the environment variable, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header" + +will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes. + +Response header names in FastAPI are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*" + +Would match all response headers that start with ``Content`` and ``X-``. + +To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*" + +The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +list containing the header values. + +For example: +``http.response.header.custom_response_header = ["<value1>", "<value2>"]`` + +Sanitizing headers +****************** +In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords, +etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS`` +to a comma delimited list of HTTP header names to be sanitized, or pass the ``http_capture_headers_sanitize_fields`` +keyword argument to the ``instrument_app`` method. + +Regexes may be used, and all header names will be matched in a case-insensitive manner. + +For example using the environment variable, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie" + +will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span. + +Note: + The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change. + +API +--- +""" + +from __future__ import annotations + +import logging +from typing import Collection, Literal + +import fastapi +from starlette.routing import Match + +from opentelemetry.instrumentation._semconv import ( + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware +from opentelemetry.instrumentation.asgi.types import ( + ClientRequestHook, + ClientResponseHook, + ServerRequestHook, +) +from opentelemetry.instrumentation.fastapi.package import _instruments +from opentelemetry.instrumentation.fastapi.version import __version__ +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.metrics import get_meter +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import get_tracer +from opentelemetry.util.http import ( + get_excluded_urls, + parse_excluded_urls, + sanitize_method, +) + +_excluded_urls_from_env = get_excluded_urls("FASTAPI") +_logger = logging.getLogger(__name__) + + +class FastAPIInstrumentor(BaseInstrumentor): + """An instrumentor for FastAPI + + See `BaseInstrumentor` + """ + + _original_fastapi = None + + @staticmethod + def instrument_app( + app, + server_request_hook: ServerRequestHook = None, + client_request_hook: ClientRequestHook = None, + client_response_hook: ClientResponseHook = None, + tracer_provider=None, + meter_provider=None, + excluded_urls=None, + http_capture_headers_server_request: list[str] | None = None, + http_capture_headers_server_response: list[str] | None = None, + http_capture_headers_sanitize_fields: list[str] | None = None, + exclude_spans: list[Literal["receive", "send"]] | None = None, + ): + """Instrument an uninstrumented FastAPI application. + + Args: + app: The fastapi ASGI application callable to forward requests to. + server_request_hook: Optional callback which is called with the server span and ASGI + scope object for every incoming request. + client_request_hook: Optional callback which is called with the internal span, and ASGI + scope and event which are sent as dictionaries for when the method receive is called. + client_response_hook: Optional callback which is called with the internal span, and ASGI + scope and event which are sent as dictionaries for when the method send is called. + tracer_provider: The optional tracer provider to use. If omitted + the current globally configured one is used. + meter_provider: The optional meter provider to use. If omitted + the current globally configured one is used. + excluded_urls: Optional comma delimited string of regexes to match URLs that should not be traced. + http_capture_headers_server_request: Optional list of HTTP headers to capture from the request. + http_capture_headers_server_response: Optional list of HTTP headers to capture from the response. + http_capture_headers_sanitize_fields: Optional list of HTTP headers to sanitize. + exclude_spans: Optionally exclude HTTP `send` and/or `receive` spans from the trace. + """ + if not hasattr(app, "_is_instrumented_by_opentelemetry"): + app._is_instrumented_by_opentelemetry = False + + if not getattr(app, "_is_instrumented_by_opentelemetry", False): + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + if excluded_urls is None: + excluded_urls = _excluded_urls_from_env + else: + excluded_urls = parse_excluded_urls(excluded_urls) + tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + + app.add_middleware( + OpenTelemetryMiddleware, + excluded_urls=excluded_urls, + default_span_details=_get_default_span_details, + server_request_hook=server_request_hook, + client_request_hook=client_request_hook, + client_response_hook=client_response_hook, + # Pass in tracer/meter to get __name__and __version__ of fastapi instrumentation + tracer=tracer, + meter=meter, + http_capture_headers_server_request=http_capture_headers_server_request, + http_capture_headers_server_response=http_capture_headers_server_response, + http_capture_headers_sanitize_fields=http_capture_headers_sanitize_fields, + exclude_spans=exclude_spans, + ) + app._is_instrumented_by_opentelemetry = True + if app not in _InstrumentedFastAPI._instrumented_fastapi_apps: + _InstrumentedFastAPI._instrumented_fastapi_apps.add(app) + else: + _logger.warning( + "Attempting to instrument FastAPI app while already instrumented" + ) + + @staticmethod + def uninstrument_app(app: fastapi.FastAPI): + app.user_middleware = [ + x + for x in app.user_middleware + if x.cls is not OpenTelemetryMiddleware + ] + app.middleware_stack = app.build_middleware_stack() + app._is_instrumented_by_opentelemetry = False + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + self._original_fastapi = fastapi.FastAPI + _InstrumentedFastAPI._tracer_provider = kwargs.get("tracer_provider") + _InstrumentedFastAPI._server_request_hook = kwargs.get( + "server_request_hook" + ) + _InstrumentedFastAPI._client_request_hook = kwargs.get( + "client_request_hook" + ) + _InstrumentedFastAPI._client_response_hook = kwargs.get( + "client_response_hook" + ) + _InstrumentedFastAPI._http_capture_headers_server_request = kwargs.get( + "http_capture_headers_server_request" + ) + _InstrumentedFastAPI._http_capture_headers_server_response = ( + kwargs.get("http_capture_headers_server_response") + ) + _InstrumentedFastAPI._http_capture_headers_sanitize_fields = ( + kwargs.get("http_capture_headers_sanitize_fields") + ) + _excluded_urls = kwargs.get("excluded_urls") + _InstrumentedFastAPI._excluded_urls = ( + _excluded_urls_from_env + if _excluded_urls is None + else parse_excluded_urls(_excluded_urls) + ) + _InstrumentedFastAPI._meter_provider = kwargs.get("meter_provider") + _InstrumentedFastAPI._exclude_spans = kwargs.get("exclude_spans") + fastapi.FastAPI = _InstrumentedFastAPI + + def _uninstrument(self, **kwargs): + for instance in _InstrumentedFastAPI._instrumented_fastapi_apps: + self.uninstrument_app(instance) + _InstrumentedFastAPI._instrumented_fastapi_apps.clear() + fastapi.FastAPI = self._original_fastapi + + +class _InstrumentedFastAPI(fastapi.FastAPI): + _tracer_provider = None + _meter_provider = None + _excluded_urls = None + _server_request_hook: ServerRequestHook = None + _client_request_hook: ClientRequestHook = None + _client_response_hook: ClientResponseHook = None + _instrumented_fastapi_apps = set() + _sem_conv_opt_in_mode = _StabilityMode.DEFAULT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + tracer = get_tracer( + __name__, + __version__, + _InstrumentedFastAPI._tracer_provider, + schema_url=_get_schema_url( + _InstrumentedFastAPI._sem_conv_opt_in_mode + ), + ) + meter = get_meter( + __name__, + __version__, + _InstrumentedFastAPI._meter_provider, + schema_url=_get_schema_url( + _InstrumentedFastAPI._sem_conv_opt_in_mode + ), + ) + self.add_middleware( + OpenTelemetryMiddleware, + excluded_urls=_InstrumentedFastAPI._excluded_urls, + default_span_details=_get_default_span_details, + server_request_hook=_InstrumentedFastAPI._server_request_hook, + client_request_hook=_InstrumentedFastAPI._client_request_hook, + client_response_hook=_InstrumentedFastAPI._client_response_hook, + # Pass in tracer/meter to get __name__and __version__ of fastapi instrumentation + tracer=tracer, + meter=meter, + http_capture_headers_server_request=_InstrumentedFastAPI._http_capture_headers_server_request, + http_capture_headers_server_response=_InstrumentedFastAPI._http_capture_headers_server_response, + http_capture_headers_sanitize_fields=_InstrumentedFastAPI._http_capture_headers_sanitize_fields, + exclude_spans=_InstrumentedFastAPI._exclude_spans, + ) + self._is_instrumented_by_opentelemetry = True + _InstrumentedFastAPI._instrumented_fastapi_apps.add(self) + + def __del__(self): + if self in _InstrumentedFastAPI._instrumented_fastapi_apps: + _InstrumentedFastAPI._instrumented_fastapi_apps.remove(self) + + +def _get_route_details(scope): + """ + Function to retrieve Starlette route from scope. + + TODO: there is currently no way to retrieve http.route from + a starlette application from scope. + See: https://github.com/encode/starlette/pull/804 + + Args: + scope: A Starlette scope + Returns: + A string containing the route or None + """ + app = scope["app"] + route = None + + for starlette_route in app.routes: + match, _ = starlette_route.matches(scope) + if match == Match.FULL: + route = starlette_route.path + break + if match == Match.PARTIAL: + route = starlette_route.path + return route + + +def _get_default_span_details(scope): + """ + Callback to retrieve span name and attributes from scope. + + Args: + scope: A Starlette scope + Returns: + A tuple of span name and attributes + """ + route = _get_route_details(scope) + method = sanitize_method(scope.get("method", "").strip()) + attributes = {} + if method == "_OTHER": + method = "HTTP" + if route: + attributes[SpanAttributes.HTTP_ROUTE] = route + if method and route: # http + span_name = f"{method} {route}" + elif route: # websocket + span_name = route + else: # fallback + span_name = method + return span_name, attributes diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/package.py new file mode 100644 index 00000000..d95a2cf6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/package.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("fastapi ~= 0.58",) + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/fastapi/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/__init__.py new file mode 100644 index 00000000..9691f884 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/__init__.py @@ -0,0 +1,776 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Note: This package is not named "flask" because of +# https://github.com/PyCQA/pylint/issues/2648 + +""" +This library builds on the OpenTelemetry WSGI middleware to track web requests +in Flask applications. In addition to opentelemetry-util-http, it +supports Flask-specific features such as: + +* The Flask url rule pattern is used as the Span name. +* The ``http.route`` Span attribute is set so that one can see which URL rule + matched a request. + +SQLCOMMENTER +***************************************** +You can optionally configure Flask instrumentation to enable sqlcommenter which enriches +the query with contextual information. + +Usage +----- + +.. code:: python + + from opentelemetry.instrumentation.flask import FlaskInstrumentor + + FlaskInstrumentor().instrument(enable_commenter=True, commenter_options={}) + +For example, FlaskInstrumentor when used with SQLAlchemyInstrumentor or Psycopg2Instrumentor, +invoking ``cursor.execute("select * from auth_users")`` will lead to sql query +``select * from auth_users`` but when SQLCommenter is enabled the query will get appended with +some configurable tags like: + +.. code:: + + select * from auth_users /*metrics=value*/;" + +Inorder for the commenter to append flask related tags to sql queries, the commenter needs +to enabled on the respective SQLAlchemyInstrumentor or Psycopg2Instrumentor framework too. + +SQLCommenter Configurations +*************************** +We can configure the tags to be appended to the sqlquery log by adding configuration +inside ``commenter_options={}`` dict. + +For example, enabling this flag will add flask and it's version which +is ``/*flask%%3A2.9.3*/`` to the SQL query as a comment (default is True): + +.. code:: python + + framework = True + +For example, enabling this flag will add route uri ``/*route='/home'*/`` +to the SQL query as a comment (default is True): + +.. code:: python + + route = True + +For example, enabling this flag will add controller name ``/*controller='home_view'*/`` +to the SQL query as a comment (default is True): + +.. code:: python + + controller = True + +Usage +----- + +.. code-block:: python + + from flask import Flask + from opentelemetry.instrumentation.flask import FlaskInstrumentor + + app = Flask(__name__) + + FlaskInstrumentor().instrument_app(app) + + @app.route("/") + def hello(): + return "Hello!" + + if __name__ == "__main__": + app.run(debug=True) + +Configuration +------------- + +Exclude lists +************* +To exclude certain URLs from tracking, set the environment variable ``OTEL_PYTHON_FLASK_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` to cover all instrumentations) to a string of comma delimited regexes that match the +URLs. + +For example, + +:: + + export OTEL_PYTHON_FLASK_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +You can also pass comma delimited regexes directly to the ``instrument_app`` method: + +.. code-block:: python + + FlaskInstrumentor().instrument_app(app, excluded_urls="client/.*/info,healthcheck") + +Request/Response hooks +********************** + +This instrumentation supports request and response hooks. These are functions that get called +right after a span is created for a request and right before the span is finished for the response. + +- The client request hook is called with the internal span and an instance of WSGIEnvironment (flask.request.environ) + when the method ``receive`` is called. +- The client response hook is called with the internal span, the status of the response and a list of key-value (tuples) + representing the response headers returned from the response when the method ``send`` is called. + +For example, + +.. code-block:: python + + def request_hook(span: Span, environ: WSGIEnvironment): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_request_hook", "some-value") + + def response_hook(span: Span, status: str, response_headers: List): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_response_hook", "some-value") + + FlaskInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook) + +Flask Request object reference: https://flask.palletsprojects.com/en/2.1.x/api/#flask.Request + +Capture HTTP request and response headers +***************************************** +You can configure the agent to capture specified HTTP headers as span attributes, according to the +`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_. + +Request headers +*************** +To capture HTTP request headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header" + +will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes. + +Request header names in Flask are case-insensitive and ``-`` characters are replaced by ``_``. So, giving the header +name as ``CUStom_Header`` in the environment variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*" + +Would match all request headers that start with ``Accept`` and ``X-``. + +To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*" + +The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.request.header.custom_request_header = ["<value1>,<value2>"]`` + +Response headers +**************** +To capture HTTP response headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header" + +will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes. + +Response header names in Flask are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*" + +Would match all response headers that start with ``Content`` and ``X-``. + +To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*" + +The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.response.header.custom_response_header = ["<value1>,<value2>"]`` + +Sanitizing headers +****************** +In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords, +etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS`` +to a comma delimited list of HTTP header names to be sanitized. Regexes may be used, and all header names will be +matched in a case-insensitive manner. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie" + +will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span. + +Note: + The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change. + +API +--- +""" + +import weakref +from logging import getLogger +from time import time_ns +from timeit import default_timer +from typing import Collection + +import flask +from packaging import version as package_version + +import opentelemetry.instrumentation.wsgi as otel_wsgi +from opentelemetry import context, trace +from opentelemetry.instrumentation._semconv import ( + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _StabilityMode, +) +from opentelemetry.instrumentation.flask.package import _instruments +from opentelemetry.instrumentation.flask.version import __version__ +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.propagators import ( + get_global_response_propagator, +) +from opentelemetry.instrumentation.utils import _start_internal_or_server_span +from opentelemetry.metrics import get_meter +from opentelemetry.semconv.attributes.http_attributes import HTTP_ROUTE +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_SERVER_REQUEST_DURATION, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.util._importlib_metadata import version +from opentelemetry.util.http import ( + get_excluded_urls, + parse_excluded_urls, + sanitize_method, +) + +_logger = getLogger(__name__) + +_ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key" +_ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key" +_ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key" +_ENVIRON_REQCTX_REF_KEY = "opentelemetry-flask.reqctx_ref_key" +_ENVIRON_TOKEN = "opentelemetry-flask.token" + +_excluded_urls_from_env = get_excluded_urls("FLASK") + +flask_version = version("flask") + +if package_version.parse(flask_version) >= package_version.parse("2.2.0"): + + def _request_ctx_ref() -> weakref.ReferenceType: + return weakref.ref(flask.globals.request_ctx._get_current_object()) + +else: + + def _request_ctx_ref() -> weakref.ReferenceType: + return weakref.ref(flask._request_ctx_stack.top) + + +def get_default_span_name(): + method = sanitize_method( + flask.request.environ.get("REQUEST_METHOD", "").strip() + ) + if method == "_OTHER": + method = "HTTP" + try: + span_name = f"{method} {flask.request.url_rule.rule}" + except AttributeError: + span_name = otel_wsgi.get_default_span_name(flask.request.environ) + return span_name + + +def _rewrapped_app( + wsgi_app, + active_requests_counter, + duration_histogram_old=None, + response_hook=None, + excluded_urls=None, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, + duration_histogram_new=None, +): + def _wrapped_app(wrapped_app_environ, start_response): + # We want to measure the time for route matching, etc. + # In theory, we could start the span here and use + # update_name later but that API is "highly discouraged" so + # we better avoid it. + wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = time_ns() + start = default_timer() + attributes = otel_wsgi.collect_request_attributes( + wrapped_app_environ, sem_conv_opt_in_mode + ) + active_requests_count_attrs = ( + otel_wsgi._parse_active_request_count_attrs( + attributes, + sem_conv_opt_in_mode, + ) + ) + + active_requests_counter.add(1, active_requests_count_attrs) + request_route = None + + def _start_response(status, response_headers, *args, **kwargs): + if flask.request and ( + excluded_urls is None + or not excluded_urls.url_disabled(flask.request.url) + ): + nonlocal request_route + request_route = flask.request.url_rule + + span = flask.request.environ.get(_ENVIRON_SPAN_KEY) + + propagator = get_global_response_propagator() + if propagator: + propagator.inject( + response_headers, + setter=otel_wsgi.default_response_propagation_setter, + ) + + if span: + otel_wsgi.add_response_attributes( + span, + status, + response_headers, + attributes, + sem_conv_opt_in_mode, + ) + if ( + span.is_recording() + and span.kind == trace.SpanKind.SERVER + ): + custom_attributes = otel_wsgi.collect_custom_response_headers_attributes( + response_headers + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + else: + _logger.warning( + "Flask environ's OpenTelemetry span " + "missing at _start_response(%s)", + status, + ) + if response_hook is not None: + response_hook(span, status, response_headers) + return start_response(status, response_headers, *args, **kwargs) + + result = wsgi_app(wrapped_app_environ, _start_response) + duration_s = default_timer() - start + if duration_histogram_old: + duration_attrs_old = otel_wsgi._parse_duration_attrs( + attributes, _StabilityMode.DEFAULT + ) + + if request_route: + # http.target to be included in old semantic conventions + duration_attrs_old[SpanAttributes.HTTP_TARGET] = str( + request_route + ) + + duration_histogram_old.record( + max(round(duration_s * 1000), 0), duration_attrs_old + ) + if duration_histogram_new: + duration_attrs_new = otel_wsgi._parse_duration_attrs( + attributes, _StabilityMode.HTTP + ) + + if request_route: + duration_attrs_new[HTTP_ROUTE] = str(request_route) + + duration_histogram_new.record( + max(duration_s, 0), duration_attrs_new + ) + active_requests_counter.add(-1, active_requests_count_attrs) + return result + + return _wrapped_app + + +def _wrapped_before_request( + request_hook=None, + tracer=None, + excluded_urls=None, + enable_commenter=True, + commenter_options=None, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, +): + def _before_request(): + if excluded_urls and excluded_urls.url_disabled(flask.request.url): + return + flask_request_environ = flask.request.environ + span_name = get_default_span_name() + + attributes = otel_wsgi.collect_request_attributes( + flask_request_environ, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + if flask.request.url_rule: + # For 404 that result from no route found, etc, we + # don't have a url_rule. + attributes[SpanAttributes.HTTP_ROUTE] = flask.request.url_rule.rule + span, token = _start_internal_or_server_span( + tracer=tracer, + span_name=span_name, + start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY), + context_carrier=flask_request_environ, + context_getter=otel_wsgi.wsgi_getter, + attributes=attributes, + ) + + if request_hook: + request_hook(span, flask_request_environ) + + if span.is_recording(): + for key, value in attributes.items(): + span.set_attribute(key, value) + if span.is_recording() and span.kind == trace.SpanKind.SERVER: + custom_attributes = ( + otel_wsgi.collect_custom_request_headers_attributes( + flask_request_environ + ) + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + + activation = trace.use_span(span, end_on_exit=True) + activation.__enter__() # pylint: disable=E1101 + flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation + flask_request_environ[_ENVIRON_REQCTX_REF_KEY] = _request_ctx_ref() + flask_request_environ[_ENVIRON_SPAN_KEY] = span + flask_request_environ[_ENVIRON_TOKEN] = token + + if enable_commenter: + current_context = context.get_current() + flask_info = {} + + # https://flask.palletsprojects.com/en/1.1.x/api/#flask.has_request_context + if flask and flask.request: + if commenter_options.get("framework", True): + flask_info["framework"] = f"flask:{flask_version}" + if ( + commenter_options.get("controller", True) + and flask.request.endpoint + ): + flask_info["controller"] = flask.request.endpoint + if ( + commenter_options.get("route", True) + and flask.request.url_rule + and flask.request.url_rule.rule + ): + flask_info["route"] = flask.request.url_rule.rule + sqlcommenter_context = context.set_value( + "SQLCOMMENTER_ORM_TAGS_AND_VALUES", flask_info, current_context + ) + context.attach(sqlcommenter_context) + + return _before_request + + +def _wrapped_teardown_request( + excluded_urls=None, +): + def _teardown_request(exc): + # pylint: disable=E1101 + if excluded_urls and excluded_urls.url_disabled(flask.request.url): + return + + activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY) + + original_reqctx_ref = flask.request.environ.get( + _ENVIRON_REQCTX_REF_KEY + ) + current_reqctx_ref = _request_ctx_ref() + if not activation or original_reqctx_ref != current_reqctx_ref: + # This request didn't start a span, maybe because it was created in + # a way that doesn't run `before_request`, like when it is created + # with `app.test_request_context`. + # + # Similarly, check that the request_ctx that created the span + # matches the current request_ctx, and only tear down if they match. + # This situation can arise if the original request_ctx handling + # the request calls functions that push new request_ctx's, + # like any decorated with `flask.copy_current_request_context`. + + return + if exc is None: + activation.__exit__(None, None, None) + else: + activation.__exit__( + type(exc), exc, getattr(exc, "__traceback__", None) + ) + + if flask.request.environ.get(_ENVIRON_TOKEN, None): + context.detach(flask.request.environ.get(_ENVIRON_TOKEN)) + + return _teardown_request + + +class _InstrumentedFlask(flask.Flask): + _excluded_urls = None + _tracer_provider = None + _request_hook = None + _response_hook = None + _enable_commenter = True + _commenter_options = None + _meter_provider = None + _sem_conv_opt_in_mode = _StabilityMode.DEFAULT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._original_wsgi_app = self.wsgi_app + self._is_instrumented_by_opentelemetry = True + + meter = get_meter( + __name__, + __version__, + _InstrumentedFlask._meter_provider, + schema_url=_get_schema_url( + _InstrumentedFlask._sem_conv_opt_in_mode + ), + ) + duration_histogram_old = None + if _report_old(_InstrumentedFlask._sem_conv_opt_in_mode): + duration_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_DURATION, + unit="ms", + description="Measures the duration of inbound HTTP requests.", + ) + duration_histogram_new = None + if _report_new(_InstrumentedFlask._sem_conv_opt_in_mode): + duration_histogram_new = meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + unit="s", + description="Duration of HTTP server requests.", + ) + active_requests_counter = meter.create_up_down_counter( + name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS, + unit="requests", + description="measures the number of concurrent HTTP requests that are currently in-flight", + ) + + self.wsgi_app = _rewrapped_app( + self.wsgi_app, + active_requests_counter, + duration_histogram_old, + _InstrumentedFlask._response_hook, + excluded_urls=_InstrumentedFlask._excluded_urls, + sem_conv_opt_in_mode=_InstrumentedFlask._sem_conv_opt_in_mode, + duration_histogram_new=duration_histogram_new, + ) + + tracer = trace.get_tracer( + __name__, + __version__, + _InstrumentedFlask._tracer_provider, + schema_url=_get_schema_url( + _InstrumentedFlask._sem_conv_opt_in_mode + ), + ) + + _before_request = _wrapped_before_request( + _InstrumentedFlask._request_hook, + tracer, + excluded_urls=_InstrumentedFlask._excluded_urls, + enable_commenter=_InstrumentedFlask._enable_commenter, + commenter_options=_InstrumentedFlask._commenter_options, + sem_conv_opt_in_mode=_InstrumentedFlask._sem_conv_opt_in_mode, + ) + self._before_request = _before_request + self.before_request(_before_request) + + _teardown_request = _wrapped_teardown_request( + excluded_urls=_InstrumentedFlask._excluded_urls, + ) + self.teardown_request(_teardown_request) + + +class FlaskInstrumentor(BaseInstrumentor): + # pylint: disable=protected-access,attribute-defined-outside-init + """An instrumentor for flask.Flask + + See `BaseInstrumentor` + """ + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + self._original_flask = flask.Flask + request_hook = kwargs.get("request_hook") + response_hook = kwargs.get("response_hook") + if callable(request_hook): + _InstrumentedFlask._request_hook = request_hook + if callable(response_hook): + _InstrumentedFlask._response_hook = response_hook + tracer_provider = kwargs.get("tracer_provider") + _InstrumentedFlask._tracer_provider = tracer_provider + excluded_urls = kwargs.get("excluded_urls") + _InstrumentedFlask._excluded_urls = ( + _excluded_urls_from_env + if excluded_urls is None + else parse_excluded_urls(excluded_urls) + ) + enable_commenter = kwargs.get("enable_commenter", True) + _InstrumentedFlask._enable_commenter = enable_commenter + + commenter_options = kwargs.get("commenter_options", {}) + _InstrumentedFlask._commenter_options = commenter_options + meter_provider = kwargs.get("meter_provider") + _InstrumentedFlask._meter_provider = meter_provider + + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + + _InstrumentedFlask._sem_conv_opt_in_mode = sem_conv_opt_in_mode + + flask.Flask = _InstrumentedFlask + + def _uninstrument(self, **kwargs): + flask.Flask = self._original_flask + + # pylint: disable=too-many-locals + @staticmethod + def instrument_app( + app, + request_hook=None, + response_hook=None, + tracer_provider=None, + excluded_urls=None, + enable_commenter=True, + commenter_options=None, + meter_provider=None, + ): + if not hasattr(app, "_is_instrumented_by_opentelemetry"): + app._is_instrumented_by_opentelemetry = False + + if not app._is_instrumented_by_opentelemetry: + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + excluded_urls = ( + parse_excluded_urls(excluded_urls) + if excluded_urls is not None + else _excluded_urls_from_env + ) + meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + duration_histogram_old = None + if _report_old(sem_conv_opt_in_mode): + duration_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_DURATION, + unit="ms", + description="Measures the duration of inbound HTTP requests.", + ) + duration_histogram_new = None + if _report_new(sem_conv_opt_in_mode): + duration_histogram_new = meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + unit="s", + description="Duration of HTTP server requests.", + ) + active_requests_counter = meter.create_up_down_counter( + name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS, + unit="{request}", + description="Number of active HTTP server requests.", + ) + + app._original_wsgi_app = app.wsgi_app + app.wsgi_app = _rewrapped_app( + app.wsgi_app, + active_requests_counter, + duration_histogram_old, + response_hook=response_hook, + excluded_urls=excluded_urls, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + duration_histogram_new=duration_histogram_new, + ) + + tracer = trace.get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + + _before_request = _wrapped_before_request( + request_hook, + tracer, + excluded_urls=excluded_urls, + enable_commenter=enable_commenter, + commenter_options=( + commenter_options if commenter_options else {} + ), + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + app._before_request = _before_request + app.before_request(_before_request) + + _teardown_request = _wrapped_teardown_request( + excluded_urls=excluded_urls, + ) + app._teardown_request = _teardown_request + app.teardown_request(_teardown_request) + app._is_instrumented_by_opentelemetry = True + else: + _logger.warning( + "Attempting to instrument Flask app while already instrumented" + ) + + @staticmethod + def uninstrument_app(app): + if hasattr(app, "_original_wsgi_app"): + app.wsgi_app = app._original_wsgi_app + + # FIXME add support for other Flask blueprints that are not None + app.before_request_funcs[None].remove(app._before_request) + app.teardown_request_funcs[None].remove(app._teardown_request) + del app._original_wsgi_app + app._is_instrumented_by_opentelemetry = False + else: + _logger.warning( + "Attempting to uninstrument Flask " + "app while already uninstrumented" + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/package.py new file mode 100644 index 00000000..150ca0ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/package.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("flask >= 1.0",) + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/flask/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/instrumentor.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/instrumentor.py new file mode 100644 index 00000000..cf079dbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/instrumentor.py @@ -0,0 +1,139 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# type: ignore + +""" +OpenTelemetry Base Instrumentor +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from logging import getLogger +from typing import Any, Collection + +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, +) +from opentelemetry.instrumentation.dependencies import ( + DependencyConflict, + get_dependency_conflicts, +) + +_LOG = getLogger(__name__) + + +class BaseInstrumentor(ABC): + """An ABC for instrumentors. + + Child classes of this ABC should instrument specific third + party libraries or frameworks either by using the + ``opentelemetry-instrument`` command or by calling their methods + directly. + + Since every third party library or framework is different and has different + instrumentation needs, more methods can be added to the child classes as + needed to provide practical instrumentation to the end user. + """ + + _instance = None + _is_instrumented_by_opentelemetry = False + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = object.__new__(cls) + + return cls._instance + + @property + def is_instrumented_by_opentelemetry(self): + return self._is_instrumented_by_opentelemetry + + @abstractmethod + def instrumentation_dependencies(self) -> Collection[str]: + """Return a list of python packages with versions that the will be instrumented. + + The format should be the same as used in requirements.txt or pyproject.toml. + + For example, if an instrumentation instruments requests 1.x, this method should look + like: + + def instrumentation_dependencies(self) -> Collection[str]: + return ['requests ~= 1.0'] + + This will ensure that the instrumentation will only be used when the specified library + is present in the environment. + """ + + def _instrument(self, **kwargs: Any): + """Instrument the library""" + + @abstractmethod + def _uninstrument(self, **kwargs: Any): + """Uninstrument the library""" + + def _check_dependency_conflicts(self) -> DependencyConflict | None: + dependencies = self.instrumentation_dependencies() + return get_dependency_conflicts(dependencies) + + def instrument(self, **kwargs: Any): + """Instrument the library + + This method will be called without any optional arguments by the + ``opentelemetry-instrument`` command. + + This means that calling this method directly without passing any + optional values should do the very same thing that the + ``opentelemetry-instrument`` command does. + """ + + if self._is_instrumented_by_opentelemetry: + _LOG.warning("Attempting to instrument while already instrumented") + return None + + # check if instrumentor has any missing or conflicting dependencies + skip_dep_check = kwargs.pop("skip_dep_check", False) + if not skip_dep_check: + conflict = self._check_dependency_conflicts() + if conflict: + _LOG.error(conflict) + return None + + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + + result = self._instrument( # pylint: disable=assignment-from-no-return + **kwargs + ) + self._is_instrumented_by_opentelemetry = True + return result + + def uninstrument(self, **kwargs: Any): + """Uninstrument the library + + See ``BaseInstrumentor.instrument`` for more information regarding the + usage of ``kwargs``. + """ + + if self._is_instrumented_by_opentelemetry: + result = self._uninstrument(**kwargs) + self._is_instrumented_by_opentelemetry = False + return result + + _LOG.warning("Attempting to uninstrument while already uninstrumented") + + return None + + +__all__ = ["BaseInstrumentor"] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/propagators.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/propagators.py new file mode 100644 index 00000000..01859599 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/propagators.py @@ -0,0 +1,125 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module implements experimental propagators to inject trace context +into response carriers. This is useful for server side frameworks that start traces +when server requests and want to share the trace context with the client so the +client can add its spans to the same trace. + +This is part of an upcoming W3C spec and will eventually make it to the Otel spec. + +https://w3c.github.io/trace-context/#trace-context-http-response-headers-format +""" + +import typing +from abc import ABC, abstractmethod + +from opentelemetry import trace +from opentelemetry.context.context import Context +from opentelemetry.propagators import textmap +from opentelemetry.trace import format_span_id, format_trace_id + +_HTTP_HEADER_ACCESS_CONTROL_EXPOSE_HEADERS = "Access-Control-Expose-Headers" +_RESPONSE_PROPAGATOR = None + + +def get_global_response_propagator(): + return _RESPONSE_PROPAGATOR + + +def set_global_response_propagator(propagator): + global _RESPONSE_PROPAGATOR # pylint:disable=global-statement + _RESPONSE_PROPAGATOR = propagator + + +class Setter(ABC): + @abstractmethod + def set(self, carrier, key, value): + """Inject the provided key value pair in carrier.""" + + +class DictHeaderSetter(Setter): + def set(self, carrier, key, value): # pylint: disable=no-self-use + old_value = carrier.get(key, "") + if old_value: + value = f"{old_value}, {value}" + carrier[key] = value + + +class FuncSetter(Setter): + """FuncSetter converts a function into a valid Setter. Any function that + can set values in a carrier can be converted into a Setter by using + FuncSetter. This is useful when injecting trace context into non-dict + objects such HTTP Response objects for different framework. + + For example, it can be used to create a setter for Falcon response object + as: + + setter = FuncSetter(falcon.api.Response.append_header) + + and then used with the propagator as: + + propagator.inject(falcon_response, setter=setter) + + This would essentially make the propagator call `falcon_response.append_header(key, value)` + """ + + def __init__(self, func): + self._func = func + + def set(self, carrier, key, value): + self._func(carrier, key, value) + + +default_setter = DictHeaderSetter() + + +class ResponsePropagator(ABC): + @abstractmethod + def inject( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + setter: textmap.Setter = default_setter, + ) -> None: + """Injects SpanContext into the HTTP response carrier.""" + + +class TraceResponsePropagator(ResponsePropagator): + """Experimental propagator that injects tracecontext into HTTP responses.""" + + def inject( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + setter: textmap.Setter = default_setter, + ) -> None: + """Injects SpanContext into the HTTP response carrier.""" + span = trace.get_current_span(context) + span_context = span.get_span_context() + if span_context == trace.INVALID_SPAN_CONTEXT: + return + + header_name = "traceresponse" + setter.set( + carrier, + header_name, + f"00-{format_trace_id(span_context.trace_id)}-{format_span_id(span_context.span_id)}-{span_context.trace_flags:02x}", + ) + setter.set( + carrier, + _HTTP_HEADER_ACCESS_CONTROL_EXPOSE_HEADERS, + header_name, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/__init__.py new file mode 100644 index 00000000..022c59f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/__init__.py @@ -0,0 +1,336 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The integration with PostgreSQL supports the `Psycopg`_ library, it can be enabled by +using ``Psycopg2Instrumentor``. + +.. _Psycopg: http://initd.org/psycopg/ + +SQLCOMMENTER +***************************************** +You can optionally configure Psycopg2 instrumentation to enable sqlcommenter which enriches +the query with contextual information. + +Usage +----- + +.. code:: python + + from opentelemetry.instrumentation.psycopg2 import Psycopg2Instrumentor + + Psycopg2Instrumentor().instrument(enable_commenter=True, commenter_options={}) + + +For example, +:: + + Invoking cursor.execute("select * from auth_users") will lead to sql query "select * from auth_users" but when SQLCommenter is enabled + the query will get appended with some configurable tags like "select * from auth_users /*tag=value*/;" + + +SQLCommenter Configurations +*************************** +We can configure the tags to be appended to the sqlquery log by adding configuration inside commenter_options(default:{}) keyword + +db_driver = True(Default) or False + +For example, +:: +Enabling this flag will add psycopg2 and it's version which is /*psycopg2%%3A2.9.3*/ + +dbapi_threadsafety = True(Default) or False + +For example, +:: +Enabling this flag will add threadsafety /*dbapi_threadsafety=2*/ + +dbapi_level = True(Default) or False + +For example, +:: +Enabling this flag will add dbapi_level /*dbapi_level='2.0'*/ + +libpq_version = True(Default) or False + +For example, +:: +Enabling this flag will add libpq_version /*libpq_version=140001*/ + +driver_paramstyle = True(Default) or False + +For example, +:: +Enabling this flag will add driver_paramstyle /*driver_paramstyle='pyformat'*/ + +opentelemetry_values = True(Default) or False + +For example, +:: +Enabling this flag will add traceparent values /*traceparent='00-03afa25236b8cd948fa853d67038ac79-405ff022e8247c46-01'*/ + +SQLComment in span attribute +**************************** +If sqlcommenter is enabled, you can optionally configure psycopg2 instrumentation to append sqlcomment to query span attribute for convenience of your platform. + +.. code:: python + + from opentelemetry.instrumentation.psycopg2 import Psycopg2Instrumentor + + Psycopg2Instrumentor().instrument( + enable_commenter=True, + enable_attribute_commenter=True, + ) + + +For example, +:: + + Invoking cursor.execute("select * from auth_users") will lead to postgresql query "select * from auth_users" but when SQLCommenter and attribute_commenter are enabled + the query will get appended with some configurable tags like "select * from auth_users /*tag=value*/;" for both server query and `db.statement` span attribute. + +Usage +----- + +.. code-block:: python + + import psycopg2 + from opentelemetry.instrumentation.psycopg2 import Psycopg2Instrumentor + + # Call instrument() to wrap all database connections + Psycopg2Instrumentor().instrument() + + cnx = psycopg2.connect(database='Database') + + cursor = cnx.cursor() + cursor.execute("CREATE TABLE IF NOT EXISTS test (testField INTEGER)") + cursor.execute("INSERT INTO test (testField) VALUES (123)") + cursor.close() + cnx.close() + +.. code-block:: python + + import psycopg2 + from opentelemetry.instrumentation.psycopg2 import Psycopg2Instrumentor + + # Alternatively, use instrument_connection for an individual connection + cnx = psycopg2.connect(database='Database') + instrumented_cnx = Psycopg2Instrumentor().instrument_connection(cnx) + cursor = instrumented_cnx.cursor() + cursor.execute("CREATE TABLE IF NOT EXISTS test (testField INTEGER)") + cursor.execute("INSERT INTO test (testField) VALUES (123)") + cursor.close() + instrumented_cnx.close() + +API +--- +""" + +import logging +import typing +from importlib.metadata import PackageNotFoundError, distribution +from typing import Collection + +import psycopg2 +from psycopg2.extensions import ( + cursor as pg_cursor, # pylint: disable=no-name-in-module +) +from psycopg2.sql import Composed # pylint: disable=no-name-in-module + +from opentelemetry.instrumentation import dbapi +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.psycopg2.package import ( + _instruments, + _instruments_psycopg2, + _instruments_psycopg2_binary, +) +from opentelemetry.instrumentation.psycopg2.version import __version__ + +_logger = logging.getLogger(__name__) +_OTEL_CURSOR_FACTORY_KEY = "_otel_orig_cursor_factory" + + +class Psycopg2Instrumentor(BaseInstrumentor): + _CONNECTION_ATTRIBUTES = { + "database": "info.dbname", + "port": "info.port", + "host": "info.host", + "user": "info.user", + } + + _DATABASE_SYSTEM = "postgresql" + + def instrumentation_dependencies(self) -> Collection[str]: + # Determine which package of psycopg2 is installed + # Right now there are two packages, psycopg2 and psycopg2-binary + # The latter is a binary wheel package that does not require a compiler + try: + distribution("psycopg2") + return (_instruments_psycopg2,) + except PackageNotFoundError: + pass + + try: + distribution("psycopg2-binary") + return (_instruments_psycopg2_binary,) + except PackageNotFoundError: + pass + + return _instruments + + def _instrument(self, **kwargs): + """Integrate with PostgreSQL Psycopg library. + Psycopg: http://initd.org/psycopg/ + """ + tracer_provider = kwargs.get("tracer_provider") + enable_sqlcommenter = kwargs.get("enable_commenter", False) + commenter_options = kwargs.get("commenter_options", {}) + enable_attribute_commenter = kwargs.get( + "enable_attribute_commenter", False + ) + dbapi.wrap_connect( + __name__, + psycopg2, + "connect", + self._DATABASE_SYSTEM, + self._CONNECTION_ATTRIBUTES, + version=__version__, + tracer_provider=tracer_provider, + db_api_integration_factory=DatabaseApiIntegration, + enable_commenter=enable_sqlcommenter, + commenter_options=commenter_options, + enable_attribute_commenter=enable_attribute_commenter, + ) + + def _uninstrument(self, **kwargs): + """ "Disable Psycopg2 instrumentation""" + dbapi.unwrap_connect(psycopg2, "connect") + + # TODO(owais): check if core dbapi can do this for all dbapi implementations e.g, pymysql and mysql + @staticmethod + def instrument_connection(connection, tracer_provider=None): + """Enable instrumentation in a psycopg2 connection. + + Args: + connection: psycopg2.extensions.connection + The psycopg2 connection object to be instrumented. + tracer_provider: opentelemetry.trace.TracerProvider, optional + The TracerProvider to use for instrumentation. If not specified, + the global TracerProvider will be used. + + Returns: + An instrumented psycopg2 connection object. + """ + + if not hasattr(connection, "_is_instrumented_by_opentelemetry"): + connection._is_instrumented_by_opentelemetry = False + + if not connection._is_instrumented_by_opentelemetry: + setattr( + connection, _OTEL_CURSOR_FACTORY_KEY, connection.cursor_factory + ) + connection.cursor_factory = _new_cursor_factory( + tracer_provider=tracer_provider + ) + connection._is_instrumented_by_opentelemetry = True + else: + _logger.warning( + "Attempting to instrument Psycopg connection while already instrumented" + ) + return connection + + # TODO(owais): check if core dbapi can do this for all dbapi implementations e.g, pymysql and mysql + @staticmethod + def uninstrument_connection(connection): + connection.cursor_factory = getattr( + connection, _OTEL_CURSOR_FACTORY_KEY, None + ) + + return connection + + +# TODO(owais): check if core dbapi can do this for all dbapi implementations e.g, pymysql and mysql +class DatabaseApiIntegration(dbapi.DatabaseApiIntegration): + def wrapped_connection( + self, + connect_method: typing.Callable[..., typing.Any], + args: typing.Tuple[typing.Any, typing.Any], + kwargs: typing.Dict[typing.Any, typing.Any], + ): + """Add object proxy to connection object.""" + base_cursor_factory = kwargs.pop("cursor_factory", None) + new_factory_kwargs = {"db_api": self} + if base_cursor_factory: + new_factory_kwargs["base_factory"] = base_cursor_factory + kwargs["cursor_factory"] = _new_cursor_factory(**new_factory_kwargs) + connection = connect_method(*args, **kwargs) + self.get_connection_attributes(connection) + return connection + + +class CursorTracer(dbapi.CursorTracer): + def get_operation_name(self, cursor, args): + if not args: + return "" + + statement = args[0] + if isinstance(statement, Composed): + statement = statement.as_string(cursor) + + if isinstance(statement, str): + # Strip leading comments so we get the operation name. + return self._leading_comment_remover.sub("", statement).split()[0] + + return "" + + def get_statement(self, cursor, args): + if not args: + return "" + + statement = args[0] + if isinstance(statement, Composed): + statement = statement.as_string(cursor) + return statement + + +def _new_cursor_factory(db_api=None, base_factory=None, tracer_provider=None): + if not db_api: + db_api = DatabaseApiIntegration( + __name__, + Psycopg2Instrumentor._DATABASE_SYSTEM, + connection_attributes=Psycopg2Instrumentor._CONNECTION_ATTRIBUTES, + version=__version__, + tracer_provider=tracer_provider, + ) + + base_factory = base_factory or pg_cursor + _cursor_tracer = CursorTracer(db_api) + + class TracedCursorFactory(base_factory): + def execute(self, *args, **kwargs): + return _cursor_tracer.traced_execution( + self, super().execute, *args, **kwargs + ) + + def executemany(self, *args, **kwargs): + return _cursor_tracer.traced_execution( + self, super().executemany, *args, **kwargs + ) + + def callproc(self, *args, **kwargs): + return _cursor_tracer.traced_execution( + self, super().callproc, *args, **kwargs + ) + + return TracedCursorFactory diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/package.py new file mode 100644 index 00000000..b1bf9290 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/package.py @@ -0,0 +1,22 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments_psycopg2 = "psycopg2 >= 2.7.3.1" +_instruments_psycopg2_binary = "psycopg2-binary >= 2.7.3.1" + +_instruments = ( + _instruments_psycopg2, + _instruments_psycopg2_binary, +) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/psycopg2/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/__init__.py new file mode 100644 index 00000000..1940e2f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/__init__.py @@ -0,0 +1,469 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This library allows tracing HTTP requests made by the +`requests <https://requests.readthedocs.io/en/master/>`_ library. + +Usage +----- + +.. code-block:: python + + import requests + from opentelemetry.instrumentation.requests import RequestsInstrumentor + + # You can optionally pass a custom TracerProvider to instrument(). + RequestsInstrumentor().instrument() + response = requests.get(url="https://www.example.org/") + +Configuration +------------- + +Request/Response hooks +********************** + +The requests instrumentation supports extending tracing behavior with the help of +request and response hooks. These are functions that are called back by the instrumentation +right after a Span is created for a request and right before the span is finished processing a response respectively. +The hooks can be configured as follows: + +.. code:: python + + import requests + from opentelemetry.instrumentation.requests import RequestsInstrumentor + + # `request_obj` is an instance of requests.PreparedRequest + def request_hook(span, request_obj): + pass + + # `request_obj` is an instance of requests.PreparedRequest + # `response` is an instance of requests.Response + def response_hook(span, request_obj, response): + pass + + RequestsInstrumentor().instrument( + request_hook=request_hook, response_hook=response_hook + ) + +Exclude lists +************* +To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_REQUESTS_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude. + +For example, + +:: + + export OTEL_PYTHON_REQUESTS_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +API +--- +""" + +from __future__ import annotations + +import functools +import types +from timeit import default_timer +from typing import Any, Callable, Collection, Optional +from urllib.parse import urlparse + +from requests.models import PreparedRequest, Response +from requests.sessions import Session +from requests.structures import CaseInsensitiveDict + +from opentelemetry.instrumentation._semconv import ( + _client_duration_attrs_new, + _client_duration_attrs_old, + _filter_semconv_duration_attrs, + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _set_http_host_client, + _set_http_method, + _set_http_net_peer_name_client, + _set_http_network_protocol_version, + _set_http_peer_port_client, + _set_http_scheme, + _set_http_url, + _set_status, + _StabilityMode, +) +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.requests.package import _instruments +from opentelemetry.instrumentation.requests.version import __version__ +from opentelemetry.instrumentation.utils import ( + is_http_instrumentation_enabled, + suppress_http_instrumentation, +) +from opentelemetry.metrics import Histogram, get_meter +from opentelemetry.propagate import inject +from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE +from opentelemetry.semconv.attributes.network_attributes import ( + NETWORK_PEER_ADDRESS, + NETWORK_PEER_PORT, +) +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_CLIENT_REQUEST_DURATION, +) +from opentelemetry.trace import SpanKind, Tracer, get_tracer +from opentelemetry.trace.span import Span +from opentelemetry.util.http import ( + ExcludeList, + get_excluded_urls, + parse_excluded_urls, + remove_url_credentials, + sanitize_method, +) +from opentelemetry.util.http.httplib import set_ip_on_next_http_connection + +_excluded_urls_from_env = get_excluded_urls("REQUESTS") + +_RequestHookT = Optional[Callable[[Span, PreparedRequest], None]] +_ResponseHookT = Optional[Callable[[Span, PreparedRequest, Response], None]] + + +def _set_http_status_code_attribute( + span, + status_code, + metric_attributes=None, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, +): + status_code_str = str(status_code) + try: + status_code = int(status_code) + except ValueError: + status_code = -1 + if metric_attributes is None: + metric_attributes = {} + # When we have durations we should set metrics only once + # Also the decision to include status code on a histogram should + # not be dependent on tracing decisions. + _set_status( + span, + metric_attributes, + status_code, + status_code_str, + server_span=False, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + +# pylint: disable=unused-argument +# pylint: disable=R0915 +def _instrument( + tracer: Tracer, + duration_histogram_old: Histogram, + duration_histogram_new: Histogram, + request_hook: _RequestHookT = None, + response_hook: _ResponseHookT = None, + excluded_urls: ExcludeList | None = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + """Enables tracing of all requests calls that go through + :code:`requests.session.Session.request` (this includes + :code:`requests.get`, etc.).""" + + # Since + # https://github.com/psf/requests/commit/d72d1162142d1bf8b1b5711c664fbbd674f349d1 + # (v0.7.0, Oct 23, 2011), get, post, etc are implemented via request which + # again, is implemented via Session.request (`Session` was named `session` + # before v1.0.0, Dec 17, 2012, see + # https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120) + + wrapped_send = Session.send + + # pylint: disable-msg=too-many-locals,too-many-branches + @functools.wraps(wrapped_send) + def instrumented_send( + self: Session, request: PreparedRequest, **kwargs: Any + ): + if excluded_urls and excluded_urls.url_disabled(request.url): + return wrapped_send(self, request, **kwargs) + + def get_or_create_headers(): + request.headers = ( + request.headers + if request.headers is not None + else CaseInsensitiveDict() + ) + return request.headers + + if not is_http_instrumentation_enabled(): + return wrapped_send(self, request, **kwargs) + + # See + # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/http-spans.md#http-client + method = request.method + span_name = get_default_span_name(method) + + url = remove_url_credentials(request.url) + + span_attributes = {} + _set_http_method( + span_attributes, + method, + sanitize_method(method), + sem_conv_opt_in_mode, + ) + _set_http_url(span_attributes, url, sem_conv_opt_in_mode) + + metric_labels = {} + _set_http_method( + metric_labels, + method, + sanitize_method(method), + sem_conv_opt_in_mode, + ) + + try: + parsed_url = urlparse(url) + if parsed_url.scheme: + if _report_old(sem_conv_opt_in_mode): + # TODO: Support opt-in for url.scheme in new semconv + _set_http_scheme( + metric_labels, parsed_url.scheme, sem_conv_opt_in_mode + ) + if parsed_url.hostname: + _set_http_host_client( + metric_labels, parsed_url.hostname, sem_conv_opt_in_mode + ) + _set_http_net_peer_name_client( + metric_labels, parsed_url.hostname, sem_conv_opt_in_mode + ) + if _report_new(sem_conv_opt_in_mode): + _set_http_host_client( + span_attributes, + parsed_url.hostname, + sem_conv_opt_in_mode, + ) + # Use semconv library when available + span_attributes[NETWORK_PEER_ADDRESS] = parsed_url.hostname + if parsed_url.port: + _set_http_peer_port_client( + metric_labels, parsed_url.port, sem_conv_opt_in_mode + ) + if _report_new(sem_conv_opt_in_mode): + _set_http_peer_port_client( + span_attributes, parsed_url.port, sem_conv_opt_in_mode + ) + # Use semconv library when available + span_attributes[NETWORK_PEER_PORT] = parsed_url.port + except ValueError: + pass + + with tracer.start_as_current_span( + span_name, kind=SpanKind.CLIENT, attributes=span_attributes + ) as span, set_ip_on_next_http_connection(span): + exception = None + if callable(request_hook): + request_hook(span, request) + + headers = get_or_create_headers() + inject(headers) + + with suppress_http_instrumentation(): + start_time = default_timer() + try: + result = wrapped_send( + self, request, **kwargs + ) # *** PROCEED + except Exception as exc: # pylint: disable=W0703 + exception = exc + result = getattr(exc, "response", None) + finally: + elapsed_time = max(default_timer() - start_time, 0) + + if isinstance(result, Response): + span_attributes = {} + _set_http_status_code_attribute( + span, + result.status_code, + metric_labels, + sem_conv_opt_in_mode, + ) + + if result.raw is not None: + version = getattr(result.raw, "version", None) + if version: + # Only HTTP/1 is supported by requests + version_text = "1.1" if version == 11 else "1.0" + _set_http_network_protocol_version( + metric_labels, version_text, sem_conv_opt_in_mode + ) + if _report_new(sem_conv_opt_in_mode): + _set_http_network_protocol_version( + span_attributes, + version_text, + sem_conv_opt_in_mode, + ) + for key, val in span_attributes.items(): + span.set_attribute(key, val) + + if callable(response_hook): + response_hook(span, request, result) + + if exception is not None and _report_new(sem_conv_opt_in_mode): + span.set_attribute(ERROR_TYPE, type(exception).__qualname__) + metric_labels[ERROR_TYPE] = type(exception).__qualname__ + + if duration_histogram_old is not None: + duration_attrs_old = _filter_semconv_duration_attrs( + metric_labels, + _client_duration_attrs_old, + _client_duration_attrs_new, + _StabilityMode.DEFAULT, + ) + duration_histogram_old.record( + max(round(elapsed_time * 1000), 0), + attributes=duration_attrs_old, + ) + if duration_histogram_new is not None: + duration_attrs_new = _filter_semconv_duration_attrs( + metric_labels, + _client_duration_attrs_old, + _client_duration_attrs_new, + _StabilityMode.HTTP, + ) + duration_histogram_new.record( + elapsed_time, attributes=duration_attrs_new + ) + + if exception is not None: + raise exception.with_traceback(exception.__traceback__) + + return result + + instrumented_send.opentelemetry_instrumentation_requests_applied = True + Session.send = instrumented_send + + +def _uninstrument(): + """Disables instrumentation of :code:`requests` through this module. + + Note that this only works if no other module also patches requests.""" + _uninstrument_from(Session) + + +def _uninstrument_from(instr_root, restore_as_bound_func: bool = False): + for instr_func_name in ("request", "send"): + instr_func = getattr(instr_root, instr_func_name) + if not getattr( + instr_func, + "opentelemetry_instrumentation_requests_applied", + False, + ): + continue + + original = instr_func.__wrapped__ # pylint:disable=no-member + if restore_as_bound_func: + original = types.MethodType(original, instr_root) + setattr(instr_root, instr_func_name, original) + + +def get_default_span_name(method: str) -> str: + """ + Default implementation for name_callback, returns HTTP {method_name}. + https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/#name + + Args: + method: string representing HTTP method + Returns: + span name + """ + method = sanitize_method(method.strip()) + if method == "_OTHER": + return "HTTP" + return method + + +class RequestsInstrumentor(BaseInstrumentor): + """An instrumentor for requests + See `BaseInstrumentor` + """ + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs: Any): + """Instruments requests module + + Args: + **kwargs: Optional arguments + ``tracer_provider``: a TracerProvider, defaults to global + ``request_hook``: An optional callback that is invoked right after a span is created. + ``response_hook``: An optional callback which is invoked right before the span is finished processing a response. + ``excluded_urls``: A string containing a comma-delimited + list of regexes used to exclude URLs from tracking + """ + semconv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + schema_url = _get_schema_url(semconv_opt_in_mode) + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=schema_url, + ) + excluded_urls = kwargs.get("excluded_urls") + meter_provider = kwargs.get("meter_provider") + meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=schema_url, + ) + duration_histogram_old = None + if _report_old(semconv_opt_in_mode): + duration_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_DURATION, + unit="ms", + description="measures the duration of the outbound HTTP request", + ) + duration_histogram_new = None + if _report_new(semconv_opt_in_mode): + duration_histogram_new = meter.create_histogram( + name=HTTP_CLIENT_REQUEST_DURATION, + unit="s", + description="Duration of HTTP client requests.", + ) + _instrument( + tracer, + duration_histogram_old, + duration_histogram_new, + request_hook=kwargs.get("request_hook"), + response_hook=kwargs.get("response_hook"), + excluded_urls=( + _excluded_urls_from_env + if excluded_urls is None + else parse_excluded_urls(excluded_urls) + ), + sem_conv_opt_in_mode=semconv_opt_in_mode, + ) + + def _uninstrument(self, **kwargs: Any): + _uninstrument() + + @staticmethod + def uninstrument_session(session: Session): + """Disables instrumentation on the session object.""" + _uninstrument_from(session, restore_as_bound_func=True) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/package.py new file mode 100644 index 00000000..9cd93a91 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/package.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("requests ~= 2.0",) + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/requests/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/sqlcommenter_utils.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/sqlcommenter_utils.py new file mode 100644 index 00000000..1eeefbf2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/sqlcommenter_utils.py @@ -0,0 +1,66 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry import context +from opentelemetry.instrumentation.utils import _url_quote + + +def _add_sql_comment(sql, **meta) -> str: + """ + Appends comments to the sql statement and returns it + """ + meta.update(**_add_framework_tags()) + comment = _generate_sql_comment(**meta) + sql = sql.rstrip() + if sql[-1] == ";": + sql = sql[:-1] + comment + ";" + else: + sql = sql + comment + return sql + + +def _generate_sql_comment(**meta) -> str: + """ + Return a SQL comment with comma delimited key=value pairs created from + **meta kwargs. + """ + key_value_delimiter = "," + + if not meta: # No entries added. + return "" + + # Sort the keywords to ensure that caching works and that testing is + # deterministic. It eases visual inspection as well. + return ( + " /*" + + key_value_delimiter.join( + f"{_url_quote(key)}={_url_quote(value)!r}" + for key, value in sorted(meta.items()) + if value is not None + ) + + "*/" + ) + + +def _add_framework_tags() -> dict: + """ + Returns orm related tags if any set by the context + """ + + sqlcommenter_framework_values = ( + context.get_value("SQLCOMMENTER_ORM_TAGS_AND_VALUES") + if context.get_value("SQLCOMMENTER_ORM_TAGS_AND_VALUES") + else {} + ) + return sqlcommenter_framework_values diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/__init__.py new file mode 100644 index 00000000..a80e6d07 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/__init__.py @@ -0,0 +1,477 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This library allows tracing HTTP requests made by the +`urllib <https://docs.python.org/3/library/urllib>`_ library. + +Usage +----- +.. code-block:: python + + from urllib import request + from opentelemetry.instrumentation.urllib import URLLibInstrumentor + + # You can optionally pass a custom TracerProvider to + # URLLibInstrumentor().instrument() + + URLLibInstrumentor().instrument() + req = request.Request('https://postman-echo.com/post', method="POST") + r = request.urlopen(req) + +Configuration +------------- + +Request/Response hooks +********************** + +The urllib instrumentation supports extending tracing behavior with the help of +request and response hooks. These are functions that are called back by the instrumentation +right after a Span is created for a request and right before the span is finished processing a response respectively. +The hooks can be configured as follows: + +.. code:: python + + from http.client import HTTPResponse + from urllib.request import Request + + from opentelemetry.instrumentation.urllib import URLLibInstrumentor + from opentelemetry.trace import Span + + + def request_hook(span: Span, request: Request): + pass + + + def response_hook(span: Span, request: Request, response: HTTPResponse): + pass + + + URLLibInstrumentor().instrument( + request_hook=request_hook, + response_hook=response_hook + ) + +Exclude lists +************* + +To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_URLLIB_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude. + +For example, + +:: + + export OTEL_PYTHON_URLLIB_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +API +--- +""" + +from __future__ import annotations + +import functools +import types +import typing +from http import client +from timeit import default_timer +from typing import Any, Collection +from urllib.request import ( # pylint: disable=no-name-in-module,import-error + OpenerDirector, + Request, +) + +from opentelemetry.instrumentation._semconv import ( + _client_duration_attrs_new, + _client_duration_attrs_old, + _filter_semconv_duration_attrs, + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _set_http_method, + _set_http_network_protocol_version, + _set_http_url, + _set_status, + _StabilityMode, +) +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.urllib.package import _instruments +from opentelemetry.instrumentation.urllib.version import __version__ +from opentelemetry.instrumentation.utils import ( + is_http_instrumentation_enabled, + suppress_http_instrumentation, +) +from opentelemetry.metrics import Histogram, Meter, get_meter +from opentelemetry.propagate import inject +from opentelemetry.semconv._incubating.metrics.http_metrics import ( + HTTP_CLIENT_REQUEST_BODY_SIZE, + HTTP_CLIENT_RESPONSE_BODY_SIZE, + create_http_client_request_body_size, + create_http_client_response_body_size, +) +from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_CLIENT_REQUEST_DURATION, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer +from opentelemetry.util.http import ( + ExcludeList, + get_excluded_urls, + parse_excluded_urls, + remove_url_credentials, + sanitize_method, +) +from opentelemetry.util.types import Attributes + +_excluded_urls_from_env = get_excluded_urls("URLLIB") + +_RequestHookT = typing.Optional[typing.Callable[[Span, Request], None]] +_ResponseHookT = typing.Optional[ + typing.Callable[[Span, Request, client.HTTPResponse], None] +] + + +class URLLibInstrumentor(BaseInstrumentor): + """An instrumentor for urllib + See `BaseInstrumentor` + """ + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs: Any): + """Instruments urllib module + + Args: + **kwargs: Optional arguments + ``tracer_provider``: a TracerProvider, defaults to global + ``request_hook``: An optional callback invoked that is invoked right after a span is created. + ``response_hook``: An optional callback which is invoked right before the span is finished processing a response + ``excluded_urls``: A string containing a comma-delimited + list of regexes used to exclude URLs from tracking + """ + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + schema_url = _get_schema_url(sem_conv_opt_in_mode) + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=schema_url, + ) + excluded_urls = kwargs.get("excluded_urls") + meter_provider = kwargs.get("meter_provider") + meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=schema_url, + ) + + histograms = _create_client_histograms(meter, sem_conv_opt_in_mode) + + _instrument( + tracer, + histograms, + request_hook=kwargs.get("request_hook"), + response_hook=kwargs.get("response_hook"), + excluded_urls=( + _excluded_urls_from_env + if excluded_urls is None + else parse_excluded_urls(excluded_urls) + ), + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + def _uninstrument(self, **kwargs: Any): + _uninstrument() + + def uninstrument_opener(self, opener: OpenerDirector): # pylint: disable=no-self-use + """uninstrument_opener a specific instance of urllib.request.OpenerDirector""" + _uninstrument_from(opener, restore_as_bound_func=True) + + +# pylint: disable=too-many-statements +def _instrument( + tracer: Tracer, + histograms: dict[str, Histogram], + request_hook: _RequestHookT = None, + response_hook: _ResponseHookT = None, + excluded_urls: ExcludeList | None = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + """Enables tracing of all requests calls that go through + :code:`urllib.Client._make_request`""" + + opener_open = OpenerDirector.open + + @functools.wraps(opener_open) + def instrumented_open(opener, fullurl, data=None, timeout=None): + if isinstance(fullurl, str): + request_ = Request(fullurl, data) + else: + request_ = fullurl + + def get_or_create_headers(): + return getattr(request_, "headers", {}) + + def call_wrapped(): + return opener_open(opener, request_, data=data, timeout=timeout) + + return _instrumented_open_call( + opener, request_, call_wrapped, get_or_create_headers + ) + + def _instrumented_open_call( + _, request, call_wrapped, get_or_create_headers + ): # pylint: disable=too-many-locals + if not is_http_instrumentation_enabled(): + return call_wrapped() + + url = request.full_url + if excluded_urls and excluded_urls.url_disabled(url): + return call_wrapped() + + method = request.get_method().upper() + + span_name = _get_span_name(method) + + url = remove_url_credentials(url) + + data = getattr(request, "data", None) + request_size = 0 if data is None else len(data) + + labels = {} + + _set_http_method( + labels, + method, + sanitize_method(method), + sem_conv_opt_in_mode, + ) + _set_http_url(labels, url, sem_conv_opt_in_mode) + + with tracer.start_as_current_span( + span_name, kind=SpanKind.CLIENT, attributes=labels + ) as span: + exception = None + if callable(request_hook): + request_hook(span, request) + + headers = get_or_create_headers() + inject(headers) + + with suppress_http_instrumentation(): + start_time = default_timer() + try: + result = call_wrapped() # *** PROCEED + except Exception as exc: # pylint: disable=W0703 + exception = exc + result = getattr(exc, "file", None) + finally: + duration_s = default_timer() - start_time + response_size = 0 + if result is not None: + response_size = int(result.headers.get("Content-Length", 0)) + code_ = result.getcode() + # set http status code based on semconv + if code_: + _set_status_code_attribute( + span, code_, labels, sem_conv_opt_in_mode + ) + + ver_ = str(getattr(result, "version", "")) + if ver_: + _set_http_network_protocol_version( + labels, f"{ver_[:1]}.{ver_[:-1]}", sem_conv_opt_in_mode + ) + + if exception is not None and _report_new(sem_conv_opt_in_mode): + span.set_attribute(ERROR_TYPE, type(exception).__qualname__) + labels[ERROR_TYPE] = type(exception).__qualname__ + + duration_attrs_old = _filter_semconv_duration_attrs( + labels, + _client_duration_attrs_old, + _client_duration_attrs_new, + sem_conv_opt_in_mode=_StabilityMode.DEFAULT, + ) + duration_attrs_new = _filter_semconv_duration_attrs( + labels, + _client_duration_attrs_old, + _client_duration_attrs_new, + sem_conv_opt_in_mode=_StabilityMode.HTTP, + ) + + duration_attrs_old[SpanAttributes.HTTP_URL] = url + + _record_histograms( + histograms, + duration_attrs_old, + duration_attrs_new, + request_size, + response_size, + duration_s, + sem_conv_opt_in_mode, + ) + + if callable(response_hook): + response_hook(span, request, result) + + if exception is not None: + raise exception.with_traceback(exception.__traceback__) + + return result + + instrumented_open.opentelemetry_instrumentation_urllib_applied = True + OpenerDirector.open = instrumented_open + + +def _uninstrument(): + """Disables instrumentation of :code:`urllib` through this module. + + Note that this only works if no other module also patches urllib.""" + _uninstrument_from(OpenerDirector) + + +def _uninstrument_from(instr_root, restore_as_bound_func: bool = False): + instr_func_name = "open" + instr_func = getattr(instr_root, instr_func_name) + if not getattr( + instr_func, + "opentelemetry_instrumentation_urllib_applied", + False, + ): + return + + original = instr_func.__wrapped__ # pylint:disable=no-member + if restore_as_bound_func: + original = types.MethodType(original, instr_root) + setattr(instr_root, instr_func_name, original) + + +def _get_span_name(method: str) -> str: + method = sanitize_method(method.strip()) + if method == "_OTHER": + method = "HTTP" + return method + + +def _set_status_code_attribute( + span: Span, + status_code: int, + metric_attributes: dict[str, Any] | None = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +) -> None: + status_code_str = str(status_code) + try: + status_code = int(status_code) + except ValueError: + status_code = -1 + + if metric_attributes is None: + metric_attributes = {} + + _set_status( + span, + metric_attributes, + status_code, + status_code_str, + server_span=False, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + +def _create_client_histograms( + meter: Meter, sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT +) -> dict[str, Histogram]: + histograms = {} + if _report_old(sem_conv_opt_in_mode): + histograms[MetricInstruments.HTTP_CLIENT_DURATION] = ( + meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_DURATION, + unit="ms", + description="Measures the duration of the outbound HTTP request", + ) + ) + histograms[MetricInstruments.HTTP_CLIENT_REQUEST_SIZE] = ( + meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, + unit="By", + description="Measures the size of HTTP request messages.", + ) + ) + histograms[MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE] = ( + meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, + unit="By", + description="Measures the size of HTTP response messages.", + ) + ) + if _report_new(sem_conv_opt_in_mode): + histograms[HTTP_CLIENT_REQUEST_DURATION] = meter.create_histogram( + name=HTTP_CLIENT_REQUEST_DURATION, + unit="s", + description="Duration of HTTP client requests.", + ) + histograms[HTTP_CLIENT_REQUEST_BODY_SIZE] = ( + create_http_client_request_body_size(meter) + ) + histograms[HTTP_CLIENT_RESPONSE_BODY_SIZE] = ( + create_http_client_response_body_size(meter) + ) + + return histograms + + +def _record_histograms( + histograms: dict[str, Histogram], + metric_attributes_old: Attributes, + metric_attributes_new: Attributes, + request_size: int, + response_size: int, + duration_s: float, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + if _report_old(sem_conv_opt_in_mode): + duration = max(round(duration_s * 1000), 0) + histograms[MetricInstruments.HTTP_CLIENT_DURATION].record( + duration, attributes=metric_attributes_old + ) + histograms[MetricInstruments.HTTP_CLIENT_REQUEST_SIZE].record( + request_size, attributes=metric_attributes_old + ) + histograms[MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE].record( + response_size, attributes=metric_attributes_old + ) + if _report_new(sem_conv_opt_in_mode): + histograms[HTTP_CLIENT_REQUEST_DURATION].record( + duration_s, attributes=metric_attributes_new + ) + histograms[HTTP_CLIENT_REQUEST_BODY_SIZE].record( + request_size, attributes=metric_attributes_new + ) + histograms[HTTP_CLIENT_RESPONSE_BODY_SIZE].record( + response_size, attributes=metric_attributes_new + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/package.py new file mode 100644 index 00000000..2dbb1905 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/package.py @@ -0,0 +1,21 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +_instruments: tuple[str, ...] = tuple() + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/__init__.py new file mode 100644 index 00000000..551c67f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/__init__.py @@ -0,0 +1,599 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This library allows tracing HTTP requests made by the +`urllib3 <https://urllib3.readthedocs.io/>`_ library. + +Usage +----- +.. code-block:: python + + import urllib3 + from opentelemetry.instrumentation.urllib3 import URLLib3Instrumentor + + def strip_query_params(url: str) -> str: + return url.split("?")[0] + + URLLib3Instrumentor().instrument( + # Remove all query params from the URL attribute on the span. + url_filter=strip_query_params, + ) + + http = urllib3.PoolManager() + response = http.request("GET", "https://www.example.org/") + +Configuration +------------- + +Request/Response hooks +********************** + +The urllib3 instrumentation supports extending tracing behavior with the help of +request and response hooks. These are functions that are called back by the instrumentation +right after a Span is created for a request and right before the span is finished processing a response respectively. +The hooks can be configured as follows: + +.. code:: python + + from typing import Any + + from urllib3.connectionpool import HTTPConnectionPool + from urllib3.response import HTTPResponse + + from opentelemetry.instrumentation.urllib3 import RequestInfo, URLLib3Instrumentor + from opentelemetry.trace import Span + + def request_hook( + span: Span, + pool: HTTPConnectionPool, + request_info: RequestInfo, + ) -> Any: + pass + + def response_hook( + span: Span, + pool: HTTPConnectionPool, + response: HTTPResponse, + ) -> Any: + pass + + URLLib3Instrumentor().instrument( + request_hook=request_hook, + response_hook=response_hook, + ) + +Exclude lists +************* + +To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_URLLIB3_EXCLUDED_URLS`` +(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude. + +For example, + +:: + + export OTEL_PYTHON_URLLIB3_EXCLUDED_URLS="client/.*/info,healthcheck" + +will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``. + +API +--- +""" + +import collections.abc +import io +import typing +from dataclasses import dataclass +from timeit import default_timer +from typing import Collection + +import urllib3.connectionpool +import wrapt + +from opentelemetry.instrumentation._semconv import ( + _client_duration_attrs_new, + _client_duration_attrs_old, + _filter_semconv_duration_attrs, + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _set_http_host_client, + _set_http_method, + _set_http_net_peer_name_client, + _set_http_network_protocol_version, + _set_http_peer_port_client, + _set_http_scheme, + _set_http_url, + _set_status, + _StabilityMode, +) +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.urllib3.package import _instruments +from opentelemetry.instrumentation.urllib3.version import __version__ +from opentelemetry.instrumentation.utils import ( + is_http_instrumentation_enabled, + suppress_http_instrumentation, + unwrap, +) +from opentelemetry.metrics import Histogram, get_meter +from opentelemetry.propagate import inject +from opentelemetry.semconv._incubating.metrics.http_metrics import ( + create_http_client_request_body_size, + create_http_client_response_body_size, +) +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_CLIENT_REQUEST_DURATION, +) +from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer +from opentelemetry.util.http import ( + ExcludeList, + get_excluded_urls, + parse_excluded_urls, + sanitize_method, +) +from opentelemetry.util.http.httplib import set_ip_on_next_http_connection + +_excluded_urls_from_env = get_excluded_urls("URLLIB3") + + +@dataclass +class RequestInfo: + """Arguments that were passed to the ``urlopen()`` call.""" + + __slots__ = ("method", "url", "headers", "body") + + # The type annotations here come from ``HTTPConnectionPool.urlopen()``. + method: str + url: str + headers: typing.Optional[typing.Mapping[str, str]] + body: typing.Union[ + bytes, typing.IO[typing.Any], typing.Iterable[bytes], str, None + ] + + +_UrlFilterT = typing.Optional[typing.Callable[[str], str]] +_RequestHookT = typing.Optional[ + typing.Callable[ + [ + Span, + urllib3.connectionpool.HTTPConnectionPool, + RequestInfo, + ], + None, + ] +] +_ResponseHookT = typing.Optional[ + typing.Callable[ + [ + Span, + urllib3.connectionpool.HTTPConnectionPool, + urllib3.response.HTTPResponse, + ], + None, + ] +] + +_URL_OPEN_ARG_TO_INDEX_MAPPING = { + "method": 0, + "url": 1, + "body": 2, +} + + +class URLLib3Instrumentor(BaseInstrumentor): + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + """Instruments the urllib3 module + + Args: + **kwargs: Optional arguments + ``tracer_provider``: a TracerProvider, defaults to global. + ``request_hook``: An optional callback that is invoked right after a span is created. + ``response_hook``: An optional callback which is invoked right before the span is finished processing a response. + ``url_filter``: A callback to process the requested URL prior + to adding it as a span attribute. + ``excluded_urls``: A string containing a comma-delimited + list of regexes used to exclude URLs from tracking + """ + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + schema_url = _get_schema_url(sem_conv_opt_in_mode) + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=schema_url, + ) + + excluded_urls = kwargs.get("excluded_urls") + + meter_provider = kwargs.get("meter_provider") + meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=schema_url, + ) + duration_histogram_old = None + request_size_histogram_old = None + response_size_histogram_old = None + if _report_old(sem_conv_opt_in_mode): + # http.client.duration histogram + duration_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_DURATION, + unit="ms", + description="Measures the duration of the outbound HTTP request", + ) + # http.client.request.size histogram + request_size_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, + unit="By", + description="Measures the size of HTTP request messages.", + ) + # http.client.response.size histogram + response_size_histogram_old = meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, + unit="By", + description="Measures the size of HTTP response messages.", + ) + + duration_histogram_new = None + request_size_histogram_new = None + response_size_histogram_new = None + if _report_new(sem_conv_opt_in_mode): + # http.client.request.duration histogram + duration_histogram_new = meter.create_histogram( + name=HTTP_CLIENT_REQUEST_DURATION, + unit="s", + description="Duration of HTTP client requests.", + ) + # http.client.request.body.size histogram + request_size_histogram_new = create_http_client_request_body_size( + meter + ) + # http.client.response.body.size histogram + response_size_histogram_new = ( + create_http_client_response_body_size(meter) + ) + _instrument( + tracer, + duration_histogram_old, + duration_histogram_new, + request_size_histogram_old, + request_size_histogram_new, + response_size_histogram_old, + response_size_histogram_new, + request_hook=kwargs.get("request_hook"), + response_hook=kwargs.get("response_hook"), + url_filter=kwargs.get("url_filter"), + excluded_urls=( + _excluded_urls_from_env + if excluded_urls is None + else parse_excluded_urls(excluded_urls) + ), + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + def _uninstrument(self, **kwargs): + _uninstrument() + + +def _get_span_name(method: str) -> str: + method = sanitize_method(method.strip()) + if method == "_OTHER": + method = "HTTP" + return method + + +def _instrument( + tracer: Tracer, + duration_histogram_old: Histogram, + duration_histogram_new: Histogram, + request_size_histogram_old: Histogram, + request_size_histogram_new: Histogram, + response_size_histogram_old: Histogram, + response_size_histogram_new: Histogram, + request_hook: _RequestHookT = None, + response_hook: _ResponseHookT = None, + url_filter: _UrlFilterT = None, + excluded_urls: ExcludeList = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + def instrumented_urlopen(wrapped, instance, args, kwargs): + if not is_http_instrumentation_enabled(): + return wrapped(*args, **kwargs) + + url = _get_url(instance, args, kwargs, url_filter) + if excluded_urls and excluded_urls.url_disabled(url): + return wrapped(*args, **kwargs) + + method = _get_url_open_arg("method", args, kwargs).upper() + headers = _prepare_headers(kwargs) + body = _get_url_open_arg("body", args, kwargs) + + span_name = _get_span_name(method) + span_attributes = {} + + _set_http_method( + span_attributes, + method, + sanitize_method(method), + sem_conv_opt_in_mode, + ) + _set_http_url(span_attributes, url, sem_conv_opt_in_mode) + + with tracer.start_as_current_span( + span_name, kind=SpanKind.CLIENT, attributes=span_attributes + ) as span, set_ip_on_next_http_connection(span): + if callable(request_hook): + request_hook( + span, + instance, + RequestInfo( + method=method, + url=url, + headers=headers, + body=body, + ), + ) + inject(headers) + # TODO: add error handling to also set exception `error.type` in new semconv + with suppress_http_instrumentation(): + start_time = default_timer() + response = wrapped(*args, **kwargs) + duration_s = default_timer() - start_time + # set http status code based on semconv + metric_attributes = {} + _set_status_code_attribute( + span, response.status, metric_attributes, sem_conv_opt_in_mode + ) + + if callable(response_hook): + response_hook(span, instance, response) + + request_size = _get_body_size(body) + response_size = int(response.headers.get("Content-Length", 0)) + + _set_metric_attributes( + metric_attributes, + instance, + response, + method, + sem_conv_opt_in_mode, + ) + + _record_metrics( + metric_attributes, + duration_histogram_old, + duration_histogram_new, + request_size_histogram_old, + request_size_histogram_new, + response_size_histogram_old, + response_size_histogram_new, + duration_s, + request_size, + response_size, + sem_conv_opt_in_mode, + ) + + return response + + wrapt.wrap_function_wrapper( + urllib3.connectionpool.HTTPConnectionPool, + "urlopen", + instrumented_urlopen, + ) + + +def _get_url_open_arg(name: str, args: typing.List, kwargs: typing.Mapping): + arg_idx = _URL_OPEN_ARG_TO_INDEX_MAPPING.get(name) + if arg_idx is not None: + try: + return args[arg_idx] + except IndexError: + pass + return kwargs.get(name) + + +def _get_url( + instance: urllib3.connectionpool.HTTPConnectionPool, + args: typing.List, + kwargs: typing.Mapping, + url_filter: _UrlFilterT, +) -> str: + url_or_path = _get_url_open_arg("url", args, kwargs) + if not url_or_path.startswith("/"): + url = url_or_path + else: + url = instance.scheme + "://" + instance.host + if _should_append_port(instance.scheme, instance.port): + url += ":" + str(instance.port) + url += url_or_path + + if url_filter: + return url_filter(url) + return url + + +def _get_body_size(body: object) -> typing.Optional[int]: + if body is None: + return 0 + if isinstance(body, collections.abc.Sized): + return len(body) + if isinstance(body, io.BytesIO): + return body.getbuffer().nbytes + return None + + +def _should_append_port(scheme: str, port: typing.Optional[int]) -> bool: + if not port: + return False + if scheme == "http" and port == 80: + return False + if scheme == "https" and port == 443: + return False + return True + + +def _prepare_headers(urlopen_kwargs: typing.Dict) -> typing.Dict: + headers = urlopen_kwargs.get("headers") + + # avoid modifying original headers on inject + headers = headers.copy() if headers is not None else {} + urlopen_kwargs["headers"] = headers + + return headers + + +def _set_status_code_attribute( + span: Span, + status_code: int, + metric_attributes: dict = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +) -> None: + status_code_str = str(status_code) + try: + status_code = int(status_code) + except ValueError: + status_code = -1 + + if metric_attributes is None: + metric_attributes = {} + + _set_status( + span, + metric_attributes, + status_code, + status_code_str, + server_span=False, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + +def _set_metric_attributes( + metric_attributes: dict, + instance: urllib3.connectionpool.HTTPConnectionPool, + response: urllib3.response.HTTPResponse, + method: str, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +) -> None: + _set_http_host_client( + metric_attributes, instance.host, sem_conv_opt_in_mode + ) + _set_http_scheme(metric_attributes, instance.scheme, sem_conv_opt_in_mode) + _set_http_method( + metric_attributes, + method, + sanitize_method(method), + sem_conv_opt_in_mode, + ) + _set_http_net_peer_name_client( + metric_attributes, instance.host, sem_conv_opt_in_mode + ) + _set_http_peer_port_client( + metric_attributes, instance.port, sem_conv_opt_in_mode + ) + + version = getattr(response, "version") + if version: + http_version = "1.1" if version == 11 else "1.0" + _set_http_network_protocol_version( + metric_attributes, http_version, sem_conv_opt_in_mode + ) + + +def _filter_attributes_semconv( + metric_attributes, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + duration_attrs_old = None + duration_attrs_new = None + if _report_old(sem_conv_opt_in_mode): + duration_attrs_old = _filter_semconv_duration_attrs( + metric_attributes, + _client_duration_attrs_old, + _client_duration_attrs_new, + _StabilityMode.DEFAULT, + ) + if _report_new(sem_conv_opt_in_mode): + duration_attrs_new = _filter_semconv_duration_attrs( + metric_attributes, + _client_duration_attrs_old, + _client_duration_attrs_new, + _StabilityMode.HTTP, + ) + + return (duration_attrs_old, duration_attrs_new) + + +def _record_metrics( + metric_attributes: dict, + duration_histogram_old: Histogram, + duration_histogram_new: Histogram, + request_size_histogram_old: Histogram, + request_size_histogram_new: Histogram, + response_size_histogram_old: Histogram, + response_size_histogram_new: Histogram, + duration_s: float, + request_size: typing.Optional[int], + response_size: int, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + attrs_old, attrs_new = _filter_attributes_semconv( + metric_attributes, sem_conv_opt_in_mode + ) + if duration_histogram_old: + # Default behavior is to record the duration in milliseconds + duration_histogram_old.record( + max(round(duration_s * 1000), 0), + attributes=attrs_old, + ) + + if duration_histogram_new: + # New semconv record the duration in seconds + duration_histogram_new.record( + duration_s, + attributes=attrs_new, + ) + + if request_size is not None: + if request_size_histogram_old: + request_size_histogram_old.record( + request_size, attributes=attrs_old + ) + + if request_size_histogram_new: + request_size_histogram_new.record( + request_size, attributes=attrs_new + ) + + if response_size_histogram_old: + response_size_histogram_old.record(response_size, attributes=attrs_old) + + if response_size_histogram_new: + response_size_histogram_new.record(response_size, attributes=attrs_new) + + +def _uninstrument(): + unwrap(urllib3.connectionpool.HTTPConnectionPool, "urlopen") diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/package.py new file mode 100644 index 00000000..568120c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/package.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("urllib3 >= 1.0.0, < 3.0.0",) + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/urllib3/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/utils.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/utils.py new file mode 100644 index 00000000..d5bf5db7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/utils.py @@ -0,0 +1,226 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import urllib.parse +from contextlib import contextmanager +from importlib import import_module +from re import escape, sub +from typing import Any, Dict, Generator, Sequence + +from wrapt import ObjectProxy + +from opentelemetry import context, trace + +# pylint: disable=E0611 +# FIXME: fix the importing of these private attributes when the location of the _SUPPRESS_HTTP_INSTRUMENTATION_KEY is defined.= +from opentelemetry.context import ( + _SUPPRESS_HTTP_INSTRUMENTATION_KEY, + _SUPPRESS_INSTRUMENTATION_KEY, +) + +# pylint: disable=E0611 +from opentelemetry.propagate import extract +from opentelemetry.trace import StatusCode +from opentelemetry.trace.propagation.tracecontext import ( + TraceContextTextMapPropagator, +) + +propagator = TraceContextTextMapPropagator() + +_SUPPRESS_INSTRUMENTATION_KEY_PLAIN = ( + "suppress_instrumentation" # Set for backward compatibility +) + + +def extract_attributes_from_object( + obj: Any, attributes: Sequence[str], existing: Dict[str, str] | None = None +) -> Dict[str, str]: + extracted: dict[str, str] = {} + if existing: + extracted.update(existing) + for attr in attributes: + value = getattr(obj, attr, None) + if value is not None: + extracted[attr] = str(value) + return extracted + + +def http_status_to_status_code( + status: int, + allow_redirect: bool = True, + server_span: bool = False, +) -> StatusCode: + """Converts an HTTP status code to an OpenTelemetry canonical status code + + Args: + status (int): HTTP status code + """ + # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status + if not isinstance(status, int): + return StatusCode.UNSET + + if status < 100: + return StatusCode.ERROR + if status <= 299: + return StatusCode.UNSET + if status <= 399 and allow_redirect: + return StatusCode.UNSET + if status <= 499 and server_span: + return StatusCode.UNSET + return StatusCode.ERROR + + +def unwrap(obj: object, attr: str): + """Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it + + The object containing the function to unwrap may be passed as dotted module path string. + + Args: + obj: Object that holds a reference to the wrapped function or dotted import path as string + attr (str): Name of the wrapped function + """ + if isinstance(obj, str): + try: + module_path, class_name = obj.rsplit(".", 1) + except ValueError as exc: + raise ImportError( + f"Cannot parse '{obj}' as dotted import path" + ) from exc + module = import_module(module_path) + try: + obj = getattr(module, class_name) + except AttributeError as exc: + raise ImportError( + f"Cannot import '{class_name}' from '{module}'" + ) from exc + + func = getattr(obj, attr, None) + if func and isinstance(func, ObjectProxy) and hasattr(func, "__wrapped__"): + setattr(obj, attr, func.__wrapped__) + + +def _start_internal_or_server_span( + tracer, + span_name, + start_time, + context_carrier, + context_getter, + attributes=None, +): + """Returns internal or server span along with the token which can be used by caller to reset context + + + Args: + tracer : tracer in use by given instrumentation library + span_name (string): name of the span + start_time : start time of the span + context_carrier : object which contains values that are + used to construct a Context. This object + must be paired with an appropriate getter + which understands how to extract a value from it. + context_getter : an object which contains a get function that can retrieve zero + or more values from the carrier and a keys function that can get all the keys + from carrier. + """ + + token = ctx = span_kind = None + if trace.get_current_span() is trace.INVALID_SPAN: + ctx = extract(context_carrier, getter=context_getter) + token = context.attach(ctx) + span_kind = trace.SpanKind.SERVER + else: + ctx = context.get_current() + span_kind = trace.SpanKind.INTERNAL + span = tracer.start_span( + name=span_name, + context=ctx, + kind=span_kind, + start_time=start_time, + attributes=attributes, + ) + return span, token + + +def _url_quote(s: Any) -> str: # pylint: disable=invalid-name + if not isinstance(s, (str, bytes)): + return s + quoted = urllib.parse.quote(s) + # Since SQL uses '%' as a keyword, '%' is a by-product of url quoting + # e.g. foo,bar --> foo%2Cbar + # thus in our quoting, we need to escape it too to finally give + # foo,bar --> foo%%2Cbar + return quoted.replace("%", "%%") + + +def _get_opentelemetry_values() -> dict[str, Any]: + """ + Return the OpenTelemetry Trace and Span IDs if Span ID is set in the + OpenTelemetry execution context. + """ + # Insert the W3C TraceContext generated + _headers: dict[str, Any] = {} + propagator.inject(_headers) + return _headers + + +def _python_path_without_directory(python_path, directory, path_separator): + return sub( + rf"{escape(directory)}{path_separator}(?!$)", + "", + python_path, + ) + + +def is_instrumentation_enabled() -> bool: + return not ( + context.get_value(_SUPPRESS_INSTRUMENTATION_KEY) + or context.get_value(_SUPPRESS_INSTRUMENTATION_KEY_PLAIN) + ) + + +def is_http_instrumentation_enabled() -> bool: + return is_instrumentation_enabled() and not context.get_value( + _SUPPRESS_HTTP_INSTRUMENTATION_KEY + ) + + +@contextmanager +def _suppress_instrumentation(*keys: str) -> Generator[None]: + """Suppress instrumentation within the context.""" + ctx = context.get_current() + for key in keys: + ctx = context.set_value(key, True, ctx) + token = context.attach(ctx) + try: + yield + finally: + context.detach(token) + + +@contextmanager +def suppress_instrumentation() -> Generator[None]: + """Suppress instrumentation within the context.""" + with _suppress_instrumentation( + _SUPPRESS_INSTRUMENTATION_KEY, _SUPPRESS_INSTRUMENTATION_KEY_PLAIN + ): + yield + + +@contextmanager +def suppress_http_instrumentation() -> Generator[None]: + """Suppress instrumentation within the context.""" + with _suppress_instrumentation(_SUPPRESS_HTTP_INSTRUMENTATION_KEY): + yield diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/__init__.py new file mode 100644 index 00000000..a0a2ce9a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/__init__.py @@ -0,0 +1,747 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This library provides a WSGI middleware that can be used on any WSGI framework +(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry. + +Usage (Flask) +------------- + +.. code-block:: python + + from flask import Flask + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + + app = Flask(__name__) + app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) + + @app.route("/") + def hello(): + return "Hello!" + + if __name__ == "__main__": + app.run(debug=True) + + +Usage (Django) +-------------- + +Modify the application's ``wsgi.py`` file as shown below. + +.. code-block:: python + + import os + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + from django.core.wsgi import get_wsgi_application + + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') + + application = get_wsgi_application() + application = OpenTelemetryMiddleware(application) + +Usage (Web.py) +-------------- + +.. code-block:: python + + import web + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + from cheroot import wsgi + + urls = ('/', 'index') + + + class index: + + def GET(self): + return "Hello, world!" + + + if __name__ == "__main__": + app = web.application(urls, globals()) + func = app.wsgifunc() + + func = OpenTelemetryMiddleware(func) + + server = wsgi.WSGIServer( + ("localhost", 5100), func, server_name="localhost" + ) + server.start() + +Configuration +------------- + +Request/Response hooks +********************** + +This instrumentation supports request and response hooks. These are functions that get called +right after a span is created for a request and right before the span is finished for the response. + +- The client request hook is called with the internal span and an instance of WSGIEnvironment when the method + ``receive`` is called. +- The client response hook is called with the internal span, the status of the response and a list of key-value (tuples) + representing the response headers returned from the response when the method ``send`` is called. + +For example, + +.. code-block:: python + + from wsgiref.types import WSGIEnvironment, StartResponse + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + + def app(environ: WSGIEnvironment, start_response: StartResponse): + start_response("200 OK", [("Content-Type", "text/plain"), ("Content-Length", "13")]) + return [b"Hello, World!"] + + def request_hook(span: Span, environ: WSGIEnvironment): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_request_hook", "some-value") + + def response_hook(span: Span, environ: WSGIEnvironment, status: str, response_headers: list[tuple[str, str]]): + if span and span.is_recording(): + span.set_attribute("custom_user_attribute_from_response_hook", "some-value") + + OpenTelemetryMiddleware(app, request_hook=request_hook, response_hook=response_hook) + +Capture HTTP request and response headers +***************************************** +You can configure the agent to capture specified HTTP headers as span attributes, according to the +`semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_. + +Request headers +*************** +To capture HTTP request headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header" + +will extract ``content-type`` and ``custom_request_header`` from the request headers and add them as span attributes. + +Request header names in WSGI are case-insensitive and ``-`` characters are replaced by ``_``. So, giving the header +name as ``CUStom_Header`` in the environment variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="Accept.*,X-.*" + +Would match all request headers that start with ``Accept`` and ``X-``. + +To capture all request headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=".*" + +The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.request.header.custom_request_header = ["<value1>,<value2>"]`` + +Response headers +**************** +To capture HTTP response headers as span attributes, set the environment variable +``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to a comma delimited list of HTTP header names. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header" + +will extract ``content-type`` and ``custom_response_header`` from the response headers and add them as span attributes. + +Response header names in WSGI are case-insensitive. So, giving the header name as ``CUStom-Header`` in the environment +variable will capture the header named ``custom-header``. + +Regular expressions may also be used to match multiple headers that correspond to the given pattern. For example: +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="Content.*,X-.*" + +Would match all response headers that start with ``Content`` and ``X-``. + +To capture all response headers, set ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE`` to ``".*"``. +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=".*" + +The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` +is the normalized HTTP header name (lowercase, with ``-`` replaced by ``_``). The value of the attribute will be a +single item list containing all the header values. + +For example: +``http.response.header.custom_response_header = ["<value1>,<value2>"]`` + +Sanitizing headers +****************** +In order to prevent storing sensitive data such as personally identifiable information (PII), session keys, passwords, +etc, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS`` +to a comma delimited list of HTTP header names to be sanitized. Regexes may be used, and all header names will be +matched in a case-insensitive manner. + +For example, +:: + + export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=".*session.*,set-cookie" + +will replace the value of headers such as ``session-id`` and ``set-cookie`` with ``[REDACTED]`` in the span. + +Note: + The environment variable names used to capture HTTP headers are still experimental, and thus are subject to change. + +Sanitizing methods +****************** +In order to prevent unbound cardinality for HTTP methods by default nonstandard ones are labeled as ``NONSTANDARD``. +To record all of the names set the environment variable ``OTEL_PYTHON_INSTRUMENTATION_HTTP_CAPTURE_ALL_METHODS`` +to a value that evaluates to true, e.g. ``1``. + +API +--- +""" + +from __future__ import annotations + +import functools +import wsgiref.util as wsgiref_util +from timeit import default_timer +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, TypeVar, cast + +from opentelemetry import context, trace +from opentelemetry.instrumentation._semconv import ( + _filter_semconv_active_request_count_attr, + _filter_semconv_duration_attrs, + _get_schema_url, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _report_new, + _report_old, + _server_active_requests_count_attrs_new, + _server_active_requests_count_attrs_old, + _server_duration_attrs_new, + _server_duration_attrs_old, + _set_http_flavor_version, + _set_http_method, + _set_http_net_host, + _set_http_net_host_port, + _set_http_net_peer_name_server, + _set_http_peer_ip_server, + _set_http_peer_port_server, + _set_http_scheme, + _set_http_target, + _set_http_user_agent, + _set_status, + _StabilityMode, +) +from opentelemetry.instrumentation.utils import _start_internal_or_server_span +from opentelemetry.instrumentation.wsgi.version import __version__ +from opentelemetry.metrics import MeterProvider, get_meter +from opentelemetry.propagators.textmap import Getter +from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.semconv.metrics.http_metrics import ( + HTTP_SERVER_REQUEST_DURATION, +) +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import TracerProvider +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.util.http import ( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE, + SanitizeValue, + _parse_url_query, + get_custom_headers, + normalise_request_header_name, + normalise_response_header_name, + remove_url_credentials, + sanitize_method, +) + +if TYPE_CHECKING: + from wsgiref.types import StartResponse, WSGIApplication, WSGIEnvironment + + +T = TypeVar("T") +RequestHook = Callable[[trace.Span, "WSGIEnvironment"], None] +ResponseHook = Callable[ + [trace.Span, "WSGIEnvironment", str, "list[tuple[str, str]]"], None +] + +_HTTP_VERSION_PREFIX = "HTTP/" +_CARRIER_KEY_PREFIX = "HTTP_" +_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX) + + +class WSGIGetter(Getter[Dict[str, Any]]): + def get(self, carrier: dict[str, Any], key: str) -> list[str] | None: + """Getter implementation to retrieve a HTTP header value from the + PEP3333-conforming WSGI environ + + Args: + carrier: WSGI environ object + key: header name in environ object + Returns: + A list with a single string with the header value if it exists, + else None. + """ + environ_key = "HTTP_" + key.upper().replace("-", "_") + value = carrier.get(environ_key) + if value is not None: + return [value] + return None + + def keys(self, carrier: dict[str, Any]): + return [ + key[_CARRIER_KEY_PREFIX_LEN:].lower().replace("_", "-") + for key in carrier + if key.startswith(_CARRIER_KEY_PREFIX) + ] + + +wsgi_getter = WSGIGetter() + + +# pylint: disable=too-many-branches +def collect_request_attributes( + environ: WSGIEnvironment, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + """Collects HTTP request attributes from the PEP3333-conforming + WSGI environ and returns a dictionary to be used as span creation attributes. + """ + result: dict[str, str | None] = {} + _set_http_method( + result, + environ.get("REQUEST_METHOD", ""), + sanitize_method(cast(str, environ.get("REQUEST_METHOD", ""))), + sem_conv_opt_in_mode, + ) + # old semconv v1.12.0 + server_name = environ.get("SERVER_NAME") + if _report_old(sem_conv_opt_in_mode): + result[SpanAttributes.HTTP_SERVER_NAME] = server_name + + _set_http_scheme( + result, + environ.get("wsgi.url_scheme"), + sem_conv_opt_in_mode, + ) + + host = environ.get("HTTP_HOST") + host_port = environ.get("SERVER_PORT") + if host: + _set_http_net_host(result, host, sem_conv_opt_in_mode) + # old semconv v1.12.0 + if _report_old(sem_conv_opt_in_mode): + result[SpanAttributes.HTTP_HOST] = host + if host_port: + _set_http_net_host_port( + result, + int(host_port), + sem_conv_opt_in_mode, + ) + + target = environ.get("RAW_URI") + if target is None: # Note: `"" or None is None` + target = environ.get("REQUEST_URI") + if target: + path, query = _parse_url_query(target) + _set_http_target(result, target, path, query, sem_conv_opt_in_mode) + else: + # old semconv v1.20.0 + if _report_old(sem_conv_opt_in_mode): + result[SpanAttributes.HTTP_URL] = remove_url_credentials( + wsgiref_util.request_uri(environ) + ) + + remote_addr = environ.get("REMOTE_ADDR") + if remote_addr: + _set_http_peer_ip_server(result, remote_addr, sem_conv_opt_in_mode) + + peer_port = environ.get("REMOTE_PORT") + if peer_port: + _set_http_peer_port_server(result, peer_port, sem_conv_opt_in_mode) + + remote_host = environ.get("REMOTE_HOST") + if remote_host and remote_host != remote_addr: + _set_http_net_peer_name_server( + result, remote_host, sem_conv_opt_in_mode + ) + + user_agent = environ.get("HTTP_USER_AGENT") + if user_agent is not None and len(user_agent) > 0: + _set_http_user_agent(result, user_agent, sem_conv_opt_in_mode) + + flavor = environ.get("SERVER_PROTOCOL", "") + if flavor.upper().startswith(_HTTP_VERSION_PREFIX): + flavor = flavor[len(_HTTP_VERSION_PREFIX) :] + if flavor: + _set_http_flavor_version(result, flavor, sem_conv_opt_in_mode) + + return result + + +def collect_custom_request_headers_attributes(environ: WSGIEnvironment): + """Returns custom HTTP request headers which are configured by the user + from the PEP3333-conforming WSGI environ to be used as span creation attributes as described + in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers + """ + + sanitize = SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ) + headers = { + key[_CARRIER_KEY_PREFIX_LEN:].replace("_", "-"): val + for key, val in environ.items() + if key.startswith(_CARRIER_KEY_PREFIX) + } + + return sanitize.sanitize_header_values( + headers, + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST + ), + normalise_request_header_name, + ) + + +def collect_custom_response_headers_attributes( + response_headers: list[tuple[str, str]], +): + """Returns custom HTTP response headers which are configured by the user from the + PEP3333-conforming WSGI environ as described in the specification + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers + """ + + sanitize = SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ) + response_headers_dict: dict[str, str] = {} + if response_headers: + for key, val in response_headers: + key = key.lower() + if key in response_headers_dict: + response_headers_dict[key] += "," + val + else: + response_headers_dict[key] = val + + return sanitize.sanitize_header_values( + response_headers_dict, + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE + ), + normalise_response_header_name, + ) + + +# TODO: Used only on the `opentelemetry-instrumentation-pyramid` package - It can be moved there. +def _parse_status_code(resp_status: str) -> int | None: + status_code, _ = resp_status.split(" ", 1) + try: + return int(status_code) + except ValueError: + return None + + +def _parse_active_request_count_attrs( + req_attrs, sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT +): + return _filter_semconv_active_request_count_attr( + req_attrs, + _server_active_requests_count_attrs_old, + _server_active_requests_count_attrs_new, + sem_conv_opt_in_mode, + ) + + +def _parse_duration_attrs( + req_attrs: dict[str, str | None], + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): + return _filter_semconv_duration_attrs( + req_attrs, + _server_duration_attrs_old, + _server_duration_attrs_new, + sem_conv_opt_in_mode, + ) + + +def add_response_attributes( + span: trace.Span, + start_response_status: str, + response_headers: list[tuple[str, str]], + duration_attrs: dict[str, str | None] | None = None, + sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT, +): # pylint: disable=unused-argument + """Adds HTTP response attributes to span using the arguments + passed to a PEP3333-conforming start_response callable. + """ + status_code_str, _ = start_response_status.split(" ", 1) + try: + status_code = int(status_code_str) + except ValueError: + status_code = -1 + if duration_attrs is None: + duration_attrs = {} + _set_status( + span, + duration_attrs, + status_code, + status_code_str, + server_span=True, + sem_conv_opt_in_mode=sem_conv_opt_in_mode, + ) + + +def get_default_span_name(environ: WSGIEnvironment) -> str: + """ + Default span name is the HTTP method and URL path, or just the method. + https://github.com/open-telemetry/opentelemetry-specification/pull/3165 + https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/#name + + Args: + environ: The WSGI environ object. + Returns: + The span name. + """ + method = sanitize_method( + cast(str, environ.get("REQUEST_METHOD", "")).strip() + ) + if method == "_OTHER": + return "HTTP" + path = cast(str, environ.get("PATH_INFO", "")).strip() + if method and path: + return f"{method} {path}" + return method + + +class OpenTelemetryMiddleware: + """The WSGI application middleware. + + This class is a PEP 3333 conforming WSGI middleware that starts and + annotates spans for any requests it is invoked with. + + Args: + wsgi: The WSGI application callable to forward requests to. + request_hook: Optional callback which is called with the server span and WSGI + environ object for every incoming request. + response_hook: Optional callback which is called with the server span, + WSGI environ, status_code and response_headers for every + incoming request. + tracer_provider: Optional tracer provider to use. If omitted the current + globally configured one is used. + meter_provider: Optional meter provider to use. If omitted the current + globally configured one is used. + """ + + def __init__( + self, + wsgi: WSGIApplication, + request_hook: RequestHook | None = None, + response_hook: ResponseHook | None = None, + tracer_provider: TracerProvider | None = None, + meter_provider: MeterProvider | None = None, + ): + # initialize semantic conventions opt-in if needed + _OpenTelemetrySemanticConventionStability._initialize() + sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.HTTP, + ) + self.wsgi = wsgi + self.tracer = trace.get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + self.meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=_get_schema_url(sem_conv_opt_in_mode), + ) + self.duration_histogram_old = None + if _report_old(sem_conv_opt_in_mode): + self.duration_histogram_old = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_DURATION, + unit="ms", + description="Measures the duration of inbound HTTP requests.", + ) + self.duration_histogram_new = None + if _report_new(sem_conv_opt_in_mode): + self.duration_histogram_new = self.meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + unit="s", + description="Duration of HTTP server requests.", + ) + # We don't need a separate active request counter for old/new semantic conventions + # because the new attributes are a subset of the old attributes + self.active_requests_counter = self.meter.create_up_down_counter( + name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS, + unit="{request}", + description="Number of active HTTP server requests.", + ) + self.request_hook = request_hook + self.response_hook = response_hook + self._sem_conv_opt_in_mode = sem_conv_opt_in_mode + + @staticmethod + def _create_start_response( + span: trace.Span, + start_response: StartResponse, + response_hook: Callable[[str, list[tuple[str, str]]], None] | None, + duration_attrs: dict[str, str | None], + sem_conv_opt_in_mode: _StabilityMode, + ): + @functools.wraps(start_response) + def _start_response( + status: str, + response_headers: list[tuple[str, str]], + *args: Any, + **kwargs: Any, + ): + add_response_attributes( + span, + status, + response_headers, + duration_attrs, + sem_conv_opt_in_mode, + ) + if span.is_recording() and span.kind == trace.SpanKind.SERVER: + custom_attributes = collect_custom_response_headers_attributes( + response_headers + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + if response_hook: + response_hook(status, response_headers) + return start_response(status, response_headers, *args, **kwargs) + + return _start_response + + # pylint: disable=too-many-branches + def __call__( + self, environ: WSGIEnvironment, start_response: StartResponse + ): + """The WSGI application + + Args: + environ: A WSGI environment. + start_response: The WSGI start_response callable. + """ + req_attrs = collect_request_attributes( + environ, self._sem_conv_opt_in_mode + ) + active_requests_count_attrs = _parse_active_request_count_attrs( + req_attrs, + self._sem_conv_opt_in_mode, + ) + + span, token = _start_internal_or_server_span( + tracer=self.tracer, + span_name=get_default_span_name(environ), + start_time=None, + context_carrier=environ, + context_getter=wsgi_getter, + attributes=req_attrs, + ) + if span.is_recording() and span.kind == trace.SpanKind.SERVER: + custom_attributes = collect_custom_request_headers_attributes( + environ + ) + if len(custom_attributes) > 0: + span.set_attributes(custom_attributes) + + if self.request_hook: + self.request_hook(span, environ) + + response_hook = self.response_hook + if response_hook: + response_hook = functools.partial(response_hook, span, environ) + + start = default_timer() + self.active_requests_counter.add(1, active_requests_count_attrs) + try: + with trace.use_span(span): + start_response = self._create_start_response( + span, + start_response, + response_hook, + req_attrs, + self._sem_conv_opt_in_mode, + ) + iterable = self.wsgi(environ, start_response) + return _end_span_after_iterating(iterable, span, token) + except Exception as ex: + if _report_new(self._sem_conv_opt_in_mode): + req_attrs[ERROR_TYPE] = type(ex).__qualname__ + if span.is_recording(): + span.set_attribute(ERROR_TYPE, type(ex).__qualname__) + span.set_status(Status(StatusCode.ERROR, str(ex))) + span.end() + if token is not None: + context.detach(token) + raise + finally: + duration_s = default_timer() - start + if self.duration_histogram_old: + duration_attrs_old = _parse_duration_attrs( + req_attrs, _StabilityMode.DEFAULT + ) + self.duration_histogram_old.record( + max(round(duration_s * 1000), 0), duration_attrs_old + ) + if self.duration_histogram_new: + duration_attrs_new = _parse_duration_attrs( + req_attrs, _StabilityMode.HTTP + ) + self.duration_histogram_new.record( + max(duration_s, 0), duration_attrs_new + ) + self.active_requests_counter.add(-1, active_requests_count_attrs) + + +# Put this in a subfunction to not delay the call to the wrapped +# WSGI application (instrumentation should change the application +# behavior as little as possible). +def _end_span_after_iterating( + iterable: Iterable[T], span: trace.Span, token: object +) -> Iterable[T]: + try: + with trace.use_span(span): + yield from iterable + finally: + close = getattr(iterable, "close", None) + if close: + close() + span.end() + if token is not None: + context.detach(token) + + +# TODO: inherit from opentelemetry.instrumentation.propagators.Setter +class ResponsePropagationSetter: + def set(self, carrier: list[tuple[str, T]], key: str, value: T): # pylint: disable=no-self-use + carrier.append((key, value)) + + +default_response_propagation_setter = ResponsePropagationSetter() diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/package.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/package.py new file mode 100644 index 00000000..2dbb1905 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/package.py @@ -0,0 +1,21 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +_instruments: tuple[str, ...] = tuple() + +_supports_metrics = True + +_semconv_status = "migration" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/instrumentation/wsgi/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/metrics/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/__init__.py new file mode 100644 index 00000000..74284ad6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/__init__.py @@ -0,0 +1,132 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The OpenTelemetry metrics API describes the classes used to generate +metrics. + +The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in +turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are +used to record measurements. + +This module provides abstract (i.e. unimplemented) classes required for +metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications +to use the API package alone without a supporting implementation. + +To get a meter, you need to provide the package name from which you are +calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter` +with the calling instrumentation name and the version of your package. + +The following code shows how to obtain a meter using the global :class:`.MeterProvider`:: + + from opentelemetry.metrics import get_meter + + meter = get_meter("example-meter") + counter = meter.create_counter("example-counter") + +.. versionadded:: 1.10.0 +.. versionchanged:: 1.12.0rc +""" + +from opentelemetry.metrics._internal import ( + Meter, + MeterProvider, + NoOpMeter, + NoOpMeterProvider, + get_meter, + get_meter_provider, + set_meter_provider, +) +from opentelemetry.metrics._internal.instrument import ( + Asynchronous, + CallbackOptions, + CallbackT, + Counter, + Histogram, + Instrument, + NoOpCounter, + NoOpHistogram, + NoOpObservableCounter, + NoOpObservableGauge, + NoOpObservableUpDownCounter, + NoOpUpDownCounter, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + Synchronous, + UpDownCounter, +) +from opentelemetry.metrics._internal.instrument import Gauge as _Gauge +from opentelemetry.metrics._internal.instrument import NoOpGauge as _NoOpGauge +from opentelemetry.metrics._internal.observation import Observation + +for obj in [ + Counter, + Synchronous, + Asynchronous, + CallbackOptions, + _Gauge, + _NoOpGauge, + get_meter_provider, + get_meter, + Histogram, + Meter, + MeterProvider, + Instrument, + NoOpCounter, + NoOpHistogram, + NoOpMeter, + NoOpMeterProvider, + NoOpObservableCounter, + NoOpObservableGauge, + NoOpObservableUpDownCounter, + NoOpUpDownCounter, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + Observation, + set_meter_provider, + UpDownCounter, +]: + obj.__module__ = __name__ + +__all__ = [ + "CallbackOptions", + "MeterProvider", + "NoOpMeterProvider", + "Meter", + "Counter", + "_Gauge", + "_NoOpGauge", + "NoOpCounter", + "UpDownCounter", + "NoOpUpDownCounter", + "Histogram", + "NoOpHistogram", + "ObservableCounter", + "NoOpObservableCounter", + "ObservableUpDownCounter", + "Instrument", + "Synchronous", + "Asynchronous", + "NoOpObservableGauge", + "ObservableGauge", + "NoOpObservableUpDownCounter", + "get_meter", + "get_meter_provider", + "set_meter_provider", + "Observation", + "CallbackT", + "NoOpMeter", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/__init__.py new file mode 100644 index 00000000..2319d8d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/__init__.py @@ -0,0 +1,889 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-ancestors + +""" +The OpenTelemetry metrics API describes the classes used to generate +metrics. + +The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in +turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are +used to record measurements. + +This module provides abstract (i.e. unimplemented) classes required for +metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications +to use the API package alone without a supporting implementation. + +To get a meter, you need to provide the package name from which you are +calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter` +with the calling instrumentation name and the version of your package. + +The following code shows how to obtain a meter using the global :class:`.MeterProvider`:: + + from opentelemetry.metrics import get_meter + + meter = get_meter("example-meter") + counter = meter.create_counter("example-counter") + +.. versionadded:: 1.10.0 +""" + +import warnings +from abc import ABC, abstractmethod +from dataclasses import dataclass +from logging import getLogger +from os import environ +from threading import Lock +from typing import Dict, List, Optional, Sequence, Union, cast + +from opentelemetry.environment_variables import OTEL_PYTHON_METER_PROVIDER +from opentelemetry.metrics._internal.instrument import ( + CallbackT, + Counter, + Gauge, + Histogram, + NoOpCounter, + NoOpGauge, + NoOpHistogram, + NoOpObservableCounter, + NoOpObservableGauge, + NoOpObservableUpDownCounter, + NoOpUpDownCounter, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, + _MetricsHistogramAdvisory, + _ProxyCounter, + _ProxyGauge, + _ProxyHistogram, + _ProxyObservableCounter, + _ProxyObservableGauge, + _ProxyObservableUpDownCounter, + _ProxyUpDownCounter, +) +from opentelemetry.util._once import Once +from opentelemetry.util._providers import _load_provider +from opentelemetry.util.types import ( + Attributes, +) + +_logger = getLogger(__name__) + + +# pylint: disable=invalid-name +_ProxyInstrumentT = Union[ + _ProxyCounter, + _ProxyHistogram, + _ProxyGauge, + _ProxyObservableCounter, + _ProxyObservableGauge, + _ProxyObservableUpDownCounter, + _ProxyUpDownCounter, +] + + +class MeterProvider(ABC): + """ + MeterProvider is the entry point of the API. It provides access to `Meter` instances. + """ + + @abstractmethod + def get_meter( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> "Meter": + """Returns a `Meter` for use by the given instrumentation library. + + For any two calls it is undefined whether the same or different + `Meter` instances are returned, even for different library names. + + This function may return different `Meter` types (e.g. a no-op meter + vs. a functional meter). + + Args: + name: The name of the instrumenting module. + ``__name__`` may not be used as this can result in + different meter names if the meters are in different files. + It is better to use a fixed string that can be imported where + needed and used consistently as the name of the meter. + + This should *not* be the name of the module that is + instrumented but the name of the module doing the instrumentation. + E.g., instead of ``"requests"``, use + ``"opentelemetry.instrumentation.requests"``. + + version: Optional. The version string of the + instrumenting library. Usually this should be the same as + ``importlib.metadata.version(instrumenting_library_name)``. + + schema_url: Optional. Specifies the Schema URL of the emitted telemetry. + attributes: Optional. Attributes that are associated with the emitted telemetry. + """ + + +class NoOpMeterProvider(MeterProvider): + """The default MeterProvider used when no MeterProvider implementation is available.""" + + def get_meter( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> "Meter": + """Returns a NoOpMeter.""" + return NoOpMeter(name, version=version, schema_url=schema_url) + + +class _ProxyMeterProvider(MeterProvider): + def __init__(self) -> None: + self._lock = Lock() + self._meters: List[_ProxyMeter] = [] + self._real_meter_provider: Optional[MeterProvider] = None + + def get_meter( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> "Meter": + with self._lock: + if self._real_meter_provider is not None: + return self._real_meter_provider.get_meter( + name, version, schema_url + ) + + meter = _ProxyMeter(name, version=version, schema_url=schema_url) + self._meters.append(meter) + return meter + + def on_set_meter_provider(self, meter_provider: MeterProvider) -> None: + with self._lock: + self._real_meter_provider = meter_provider + for meter in self._meters: + meter.on_set_meter_provider(meter_provider) + + +@dataclass +class _InstrumentRegistrationStatus: + instrument_id: str + already_registered: bool + conflict: bool + current_advisory: Optional[_MetricsHistogramAdvisory] + + +class Meter(ABC): + """Handles instrument creation. + + This class provides methods for creating instruments which are then + used to produce measurements. + """ + + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + ) -> None: + super().__init__() + self._name = name + self._version = version + self._schema_url = schema_url + self._instrument_ids: Dict[ + str, Optional[_MetricsHistogramAdvisory] + ] = {} + self._instrument_ids_lock = Lock() + + @property + def name(self) -> str: + """ + The name of the instrumenting module. + """ + return self._name + + @property + def version(self) -> Optional[str]: + """ + The version string of the instrumenting library. + """ + return self._version + + @property + def schema_url(self) -> Optional[str]: + """ + Specifies the Schema URL of the emitted telemetry + """ + return self._schema_url + + def _register_instrument( + self, + name: str, + type_: type, + unit: str, + description: str, + advisory: Optional[_MetricsHistogramAdvisory] = None, + ) -> _InstrumentRegistrationStatus: + """ + Register an instrument with the name, type, unit and description as + identifying keys and the advisory as value. + + Returns a tuple. The first value is the instrument id. + The second value is an `_InstrumentRegistrationStatus` where + `already_registered` is `True` if the instrument has been registered + already. + If `conflict` is set to True the `current_advisory` attribute contains + the registered instrument advisory. + """ + + instrument_id = ",".join( + [name.strip().lower(), type_.__name__, unit, description] + ) + + already_registered = False + conflict = False + current_advisory = None + + with self._instrument_ids_lock: + # we are not using get because None is a valid value + already_registered = instrument_id in self._instrument_ids + if already_registered: + current_advisory = self._instrument_ids[instrument_id] + conflict = current_advisory != advisory + else: + self._instrument_ids[instrument_id] = advisory + + return _InstrumentRegistrationStatus( + instrument_id=instrument_id, + already_registered=already_registered, + conflict=conflict, + current_advisory=current_advisory, + ) + + @staticmethod + def _log_instrument_registration_conflict( + name: str, + instrumentation_type: str, + unit: str, + description: str, + status: _InstrumentRegistrationStatus, + ) -> None: + _logger.warning( + "An instrument with name %s, type %s, unit %s and " + "description %s has been created already with a " + "different advisory value %s and will be used instead.", + name, + instrumentation_type, + unit, + description, + status.current_advisory, + ) + + @abstractmethod + def create_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> Counter: + """Creates a `Counter` instrument + + Args: + name: The name of the instrument to be created + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + @abstractmethod + def create_up_down_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> UpDownCounter: + """Creates an `UpDownCounter` instrument + + Args: + name: The name of the instrument to be created + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + @abstractmethod + def create_observable_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableCounter: + """Creates an `ObservableCounter` instrument + + An observable counter observes a monotonically increasing count by calling provided + callbacks which accept a :class:`~opentelemetry.metrics.CallbackOptions` and return + multiple :class:`~opentelemetry.metrics.Observation`. + + For example, an observable counter could be used to report system CPU + time periodically. Here is a basic implementation:: + + def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]: + observations = [] + with open("/proc/stat") as procstat: + procstat.readline() # skip the first line + for line in procstat: + if not line.startswith("cpu"): break + cpu, *states = line.split() + observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"})) + observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"})) + observations.append(Observation(int(states[2]) // 100, {"cpu": cpu, "state": "system"})) + # ... other states + return observations + + meter.create_observable_counter( + "system.cpu.time", + callbacks=[cpu_time_callback], + unit="s", + description="CPU time" + ) + + To reduce memory usage, you can use generator callbacks instead of + building the full list:: + + def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]: + with open("/proc/stat") as procstat: + procstat.readline() # skip the first line + for line in procstat: + if not line.startswith("cpu"): break + cpu, *states = line.split() + yield Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"}) + yield Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"}) + # ... other states + + Alternatively, you can pass a sequence of generators directly instead of a sequence of + callbacks, which each should return iterables of :class:`~opentelemetry.metrics.Observation`:: + + def cpu_time_callback(states_to_include: set[str]) -> Iterable[Iterable[Observation]]: + # accept options sent in from OpenTelemetry + options = yield + while True: + observations = [] + with open("/proc/stat") as procstat: + procstat.readline() # skip the first line + for line in procstat: + if not line.startswith("cpu"): break + cpu, *states = line.split() + if "user" in states_to_include: + observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"})) + if "nice" in states_to_include: + observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"})) + # ... other states + # yield the observations and receive the options for next iteration + options = yield observations + + meter.create_observable_counter( + "system.cpu.time", + callbacks=[cpu_time_callback({"user", "system"})], + unit="s", + description="CPU time" + ) + + The :class:`~opentelemetry.metrics.CallbackOptions` contain a timeout which the + callback should respect. For example if the callback does asynchronous work, like + making HTTP requests, it should respect the timeout:: + + def scrape_http_callback(options: CallbackOptions) -> Iterable[Observation]: + r = requests.get('http://scrapethis.com', timeout=options.timeout_millis / 10**3) + for value in r.json(): + yield Observation(value) + + Args: + name: The name of the instrument to be created + callbacks: A sequence of callbacks that return an iterable of + :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a sequence of generators that each + yields iterables of :class:`~opentelemetry.metrics.Observation`. + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + @abstractmethod + def create_histogram( + self, + name: str, + unit: str = "", + description: str = "", + *, + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> Histogram: + """Creates a :class:`~opentelemetry.metrics.Histogram` instrument + + Args: + name: The name of the instrument to be created + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + def create_gauge( # type: ignore # pylint: disable=no-self-use + self, + name: str, + unit: str = "", + description: str = "", + ) -> Gauge: # pyright: ignore[reportReturnType] + """Creates a ``Gauge`` instrument + + Args: + name: The name of the instrument to be created + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + warnings.warn("create_gauge() is not implemented and will be a no-op") + + @abstractmethod + def create_observable_gauge( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableGauge: + """Creates an `ObservableGauge` instrument + + Args: + name: The name of the instrument to be created + callbacks: A sequence of callbacks that return an iterable of + :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables + of :class:`~opentelemetry.metrics.Observation`. + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + @abstractmethod + def create_observable_up_down_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableUpDownCounter: + """Creates an `ObservableUpDownCounter` instrument + + Args: + name: The name of the instrument to be created + callbacks: A sequence of callbacks that return an iterable of + :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables + of :class:`~opentelemetry.metrics.Observation`. + unit: The unit for observations this instrument reports. For + example, ``By`` for bytes. UCUM units are recommended. + description: A description for this instrument and what it measures. + """ + + +class _ProxyMeter(Meter): + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + ) -> None: + super().__init__(name, version=version, schema_url=schema_url) + self._lock = Lock() + self._instruments: List[_ProxyInstrumentT] = [] + self._real_meter: Optional[Meter] = None + + def on_set_meter_provider(self, meter_provider: MeterProvider) -> None: + """Called when a real meter provider is set on the creating _ProxyMeterProvider + + Creates a real backing meter for this instance and notifies all created + instruments so they can create real backing instruments. + """ + real_meter = meter_provider.get_meter( + self._name, self._version, self._schema_url + ) + + with self._lock: + self._real_meter = real_meter + # notify all proxy instruments of the new meter so they can create + # real instruments to back themselves + for instrument in self._instruments: + instrument.on_meter_set(real_meter) + + def create_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> Counter: + with self._lock: + if self._real_meter: + return self._real_meter.create_counter(name, unit, description) + proxy = _ProxyCounter(name, unit, description) + self._instruments.append(proxy) + return proxy + + def create_up_down_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> UpDownCounter: + with self._lock: + if self._real_meter: + return self._real_meter.create_up_down_counter( + name, unit, description + ) + proxy = _ProxyUpDownCounter(name, unit, description) + self._instruments.append(proxy) + return proxy + + def create_observable_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableCounter: + with self._lock: + if self._real_meter: + return self._real_meter.create_observable_counter( + name, callbacks, unit, description + ) + proxy = _ProxyObservableCounter( + name, callbacks, unit=unit, description=description + ) + self._instruments.append(proxy) + return proxy + + def create_histogram( + self, + name: str, + unit: str = "", + description: str = "", + *, + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> Histogram: + with self._lock: + if self._real_meter: + return self._real_meter.create_histogram( + name, + unit, + description, + explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, + ) + proxy = _ProxyHistogram( + name, unit, description, explicit_bucket_boundaries_advisory + ) + self._instruments.append(proxy) + return proxy + + def create_gauge( + self, + name: str, + unit: str = "", + description: str = "", + ) -> Gauge: + with self._lock: + if self._real_meter: + return self._real_meter.create_gauge(name, unit, description) + proxy = _ProxyGauge(name, unit, description) + self._instruments.append(proxy) + return proxy + + def create_observable_gauge( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableGauge: + with self._lock: + if self._real_meter: + return self._real_meter.create_observable_gauge( + name, callbacks, unit, description + ) + proxy = _ProxyObservableGauge( + name, callbacks, unit=unit, description=description + ) + self._instruments.append(proxy) + return proxy + + def create_observable_up_down_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableUpDownCounter: + with self._lock: + if self._real_meter: + return self._real_meter.create_observable_up_down_counter( + name, + callbacks, + unit, + description, + ) + proxy = _ProxyObservableUpDownCounter( + name, callbacks, unit=unit, description=description + ) + self._instruments.append(proxy) + return proxy + + +class NoOpMeter(Meter): + """The default Meter used when no Meter implementation is available. + + All operations are no-op. + """ + + def create_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> Counter: + """Returns a no-op Counter.""" + status = self._register_instrument( + name, NoOpCounter, unit, description + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + Counter.__name__, + unit, + description, + status, + ) + + return NoOpCounter(name, unit=unit, description=description) + + def create_gauge( + self, + name: str, + unit: str = "", + description: str = "", + ) -> Gauge: + """Returns a no-op Gauge.""" + status = self._register_instrument(name, NoOpGauge, unit, description) + if status.conflict: + self._log_instrument_registration_conflict( + name, + Gauge.__name__, + unit, + description, + status, + ) + return NoOpGauge(name, unit=unit, description=description) + + def create_up_down_counter( + self, + name: str, + unit: str = "", + description: str = "", + ) -> UpDownCounter: + """Returns a no-op UpDownCounter.""" + status = self._register_instrument( + name, NoOpUpDownCounter, unit, description + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + UpDownCounter.__name__, + unit, + description, + status, + ) + return NoOpUpDownCounter(name, unit=unit, description=description) + + def create_observable_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableCounter: + """Returns a no-op ObservableCounter.""" + status = self._register_instrument( + name, NoOpObservableCounter, unit, description + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + ObservableCounter.__name__, + unit, + description, + status, + ) + return NoOpObservableCounter( + name, + callbacks, + unit=unit, + description=description, + ) + + def create_histogram( + self, + name: str, + unit: str = "", + description: str = "", + *, + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> Histogram: + """Returns a no-op Histogram.""" + status = self._register_instrument( + name, + NoOpHistogram, + unit, + description, + _MetricsHistogramAdvisory( + explicit_bucket_boundaries=explicit_bucket_boundaries_advisory + ), + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + Histogram.__name__, + unit, + description, + status, + ) + return NoOpHistogram( + name, + unit=unit, + description=description, + explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, + ) + + def create_observable_gauge( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableGauge: + """Returns a no-op ObservableGauge.""" + status = self._register_instrument( + name, NoOpObservableGauge, unit, description + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + ObservableGauge.__name__, + unit, + description, + status, + ) + return NoOpObservableGauge( + name, + callbacks, + unit=unit, + description=description, + ) + + def create_observable_up_down_counter( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> ObservableUpDownCounter: + """Returns a no-op ObservableUpDownCounter.""" + status = self._register_instrument( + name, NoOpObservableUpDownCounter, unit, description + ) + if status.conflict: + self._log_instrument_registration_conflict( + name, + ObservableUpDownCounter.__name__, + unit, + description, + status, + ) + return NoOpObservableUpDownCounter( + name, + callbacks, + unit=unit, + description=description, + ) + + +_METER_PROVIDER_SET_ONCE = Once() +_METER_PROVIDER: Optional[MeterProvider] = None +_PROXY_METER_PROVIDER = _ProxyMeterProvider() + + +def get_meter( + name: str, + version: str = "", + meter_provider: Optional[MeterProvider] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, +) -> "Meter": + """Returns a `Meter` for use by the given instrumentation library. + + This function is a convenience wrapper for + `opentelemetry.metrics.MeterProvider.get_meter`. + + If meter_provider is omitted the current configured one is used. + """ + if meter_provider is None: + meter_provider = get_meter_provider() + return meter_provider.get_meter(name, version, schema_url, attributes) + + +def _set_meter_provider(meter_provider: MeterProvider, log: bool) -> None: + def set_mp() -> None: + global _METER_PROVIDER # pylint: disable=global-statement + _METER_PROVIDER = meter_provider + + # gives all proxies real instruments off the newly set meter provider + _PROXY_METER_PROVIDER.on_set_meter_provider(meter_provider) + + did_set = _METER_PROVIDER_SET_ONCE.do_once(set_mp) + + if log and not did_set: + _logger.warning("Overriding of current MeterProvider is not allowed") + + +def set_meter_provider(meter_provider: MeterProvider) -> None: + """Sets the current global :class:`~.MeterProvider` object. + + This can only be done once, a warning will be logged if any further attempt + is made. + """ + _set_meter_provider(meter_provider, log=True) + + +def get_meter_provider() -> MeterProvider: + """Gets the current global :class:`~.MeterProvider` object.""" + + if _METER_PROVIDER is None: + if OTEL_PYTHON_METER_PROVIDER not in environ: + return _PROXY_METER_PROVIDER + + meter_provider: MeterProvider = _load_provider( # type: ignore + OTEL_PYTHON_METER_PROVIDER, "meter_provider" + ) + _set_meter_provider(meter_provider, log=False) + + # _METER_PROVIDER will have been set by one thread + return cast("MeterProvider", _METER_PROVIDER) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/instrument.py b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/instrument.py new file mode 100644 index 00000000..0d5ec951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/instrument.py @@ -0,0 +1,530 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-ancestors + + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from logging import getLogger +from re import compile as re_compile +from typing import ( + Callable, + Dict, + Generator, + Generic, + Iterable, + Optional, + Sequence, + TypeVar, + Union, +) + +# pylint: disable=unused-import; needed for typing and sphinx +from opentelemetry import metrics +from opentelemetry.context import Context +from opentelemetry.metrics._internal.observation import Observation +from opentelemetry.util.types import ( + Attributes, +) + +_logger = getLogger(__name__) + +_name_regex = re_compile(r"[a-zA-Z][-_./a-zA-Z0-9]{0,254}") +_unit_regex = re_compile(r"[\x00-\x7F]{0,63}") + + +@dataclass(frozen=True) +class _MetricsHistogramAdvisory: + explicit_bucket_boundaries: Optional[Sequence[float]] = None + + +@dataclass(frozen=True) +class CallbackOptions: + """Options for the callback + + Args: + timeout_millis: Timeout for the callback's execution. If the callback does asynchronous + work (e.g. HTTP requests), it should respect this timeout. + """ + + timeout_millis: float = 10_000 + + +InstrumentT = TypeVar("InstrumentT", bound="Instrument") +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + + +class Instrument(ABC): + """Abstract class that serves as base for all instruments.""" + + @abstractmethod + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + ) -> None: + pass + + @staticmethod + def _check_name_unit_description( + name: str, unit: str, description: str + ) -> Dict[str, Optional[str]]: + """ + Checks the following instrument name, unit and description for + compliance with the spec. + + Returns a dict with keys "name", "unit" and "description", the + corresponding values will be the checked strings or `None` if the value + is invalid. If valid, the checked strings should be used instead of the + original values. + """ + + result: Dict[str, Optional[str]] = {} + + if _name_regex.fullmatch(name) is not None: + result["name"] = name + else: + result["name"] = None + + if unit is None: + unit = "" + if _unit_regex.fullmatch(unit) is not None: + result["unit"] = unit + else: + result["unit"] = None + + if description is None: + result["description"] = "" + else: + result["description"] = description + + return result + + +class _ProxyInstrument(ABC, Generic[InstrumentT]): + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + ) -> None: + self._name = name + self._unit = unit + self._description = description + self._real_instrument: Optional[InstrumentT] = None + + def on_meter_set(self, meter: "metrics.Meter") -> None: + """Called when a real meter is set on the creating _ProxyMeter""" + + # We don't need any locking on proxy instruments because it's OK if some + # measurements get dropped while a real backing instrument is being + # created. + self._real_instrument = self._create_real_instrument(meter) + + @abstractmethod + def _create_real_instrument(self, meter: "metrics.Meter") -> InstrumentT: + """Create an instance of the real instrument. Implement this.""" + + +class _ProxyAsynchronousInstrument(_ProxyInstrument[InstrumentT]): + def __init__( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> None: + super().__init__(name, unit, description) + self._callbacks = callbacks + + +class Synchronous(Instrument): + """Base class for all synchronous instruments""" + + +class Asynchronous(Instrument): + """Base class for all asynchronous instruments""" + + @abstractmethod + def __init__( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> None: + super().__init__(name, unit=unit, description=description) + + +class Counter(Synchronous): + """A Counter is a synchronous `Instrument` which supports non-negative increments.""" + + @abstractmethod + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + pass + + +class NoOpCounter(Counter): + """No-op implementation of `Counter`.""" + + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + ) -> None: + super().__init__(name, unit=unit, description=description) + + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + return super().add(amount, attributes=attributes, context=context) + + +class _ProxyCounter(_ProxyInstrument[Counter], Counter): + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + if self._real_instrument: + self._real_instrument.add(amount, attributes, context) + + def _create_real_instrument(self, meter: "metrics.Meter") -> Counter: + return meter.create_counter( + self._name, + self._unit, + self._description, + ) + + +class UpDownCounter(Synchronous): + """An UpDownCounter is a synchronous `Instrument` which supports increments and decrements.""" + + @abstractmethod + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + pass + + +class NoOpUpDownCounter(UpDownCounter): + """No-op implementation of `UpDownCounter`.""" + + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + ) -> None: + super().__init__(name, unit=unit, description=description) + + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + return super().add(amount, attributes=attributes, context=context) + + +class _ProxyUpDownCounter(_ProxyInstrument[UpDownCounter], UpDownCounter): + def add( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + if self._real_instrument: + self._real_instrument.add(amount, attributes, context) + + def _create_real_instrument(self, meter: "metrics.Meter") -> UpDownCounter: + return meter.create_up_down_counter( + self._name, + self._unit, + self._description, + ) + + +class ObservableCounter(Asynchronous): + """An ObservableCounter is an asynchronous `Instrument` which reports monotonically + increasing value(s) when the instrument is being observed. + """ + + +class NoOpObservableCounter(ObservableCounter): + """No-op implementation of `ObservableCounter`.""" + + def __init__( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> None: + super().__init__( + name, + callbacks, + unit=unit, + description=description, + ) + + +class _ProxyObservableCounter( + _ProxyAsynchronousInstrument[ObservableCounter], ObservableCounter +): + def _create_real_instrument( + self, meter: "metrics.Meter" + ) -> ObservableCounter: + return meter.create_observable_counter( + self._name, + self._callbacks, + self._unit, + self._description, + ) + + +class ObservableUpDownCounter(Asynchronous): + """An ObservableUpDownCounter is an asynchronous `Instrument` which reports additive value(s) (e.g. + the process heap size - it makes sense to report the heap size from multiple processes and sum them + up, so we get the total heap usage) when the instrument is being observed. + """ + + +class NoOpObservableUpDownCounter(ObservableUpDownCounter): + """No-op implementation of `ObservableUpDownCounter`.""" + + def __init__( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> None: + super().__init__( + name, + callbacks, + unit=unit, + description=description, + ) + + +class _ProxyObservableUpDownCounter( + _ProxyAsynchronousInstrument[ObservableUpDownCounter], + ObservableUpDownCounter, +): + def _create_real_instrument( + self, meter: "metrics.Meter" + ) -> ObservableUpDownCounter: + return meter.create_observable_up_down_counter( + self._name, + self._callbacks, + self._unit, + self._description, + ) + + +class Histogram(Synchronous): + """Histogram is a synchronous `Instrument` which can be used to report arbitrary values + that are likely to be statistically meaningful. It is intended for statistics such as + histograms, summaries, and percentile. + """ + + @abstractmethod + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> None: + pass + + @abstractmethod + def record( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + pass + + +class NoOpHistogram(Histogram): + """No-op implementation of `Histogram`.""" + + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> None: + super().__init__( + name, + unit=unit, + description=description, + explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, + ) + + def record( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + return super().record(amount, attributes=attributes, context=context) + + +class _ProxyHistogram(_ProxyInstrument[Histogram], Histogram): + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> None: + super().__init__(name, unit=unit, description=description) + self._explicit_bucket_boundaries_advisory = ( + explicit_bucket_boundaries_advisory + ) + + def record( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + if self._real_instrument: + self._real_instrument.record(amount, attributes, context) + + def _create_real_instrument(self, meter: "metrics.Meter") -> Histogram: + return meter.create_histogram( + self._name, + self._unit, + self._description, + explicit_bucket_boundaries_advisory=self._explicit_bucket_boundaries_advisory, + ) + + +class ObservableGauge(Asynchronous): + """Asynchronous Gauge is an asynchronous `Instrument` which reports non-additive value(s) (e.g. + the room temperature - it makes no sense to report the temperature value from multiple rooms + and sum them up) when the instrument is being observed. + """ + + +class NoOpObservableGauge(ObservableGauge): + """No-op implementation of `ObservableGauge`.""" + + def __init__( + self, + name: str, + callbacks: Optional[Sequence[CallbackT]] = None, + unit: str = "", + description: str = "", + ) -> None: + super().__init__( + name, + callbacks, + unit=unit, + description=description, + ) + + +class _ProxyObservableGauge( + _ProxyAsynchronousInstrument[ObservableGauge], + ObservableGauge, +): + def _create_real_instrument( + self, meter: "metrics.Meter" + ) -> ObservableGauge: + return meter.create_observable_gauge( + self._name, + self._callbacks, + self._unit, + self._description, + ) + + +class Gauge(Synchronous): + """A Gauge is a synchronous `Instrument` which can be used to record non-additive values as they occur.""" + + @abstractmethod + def set( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + pass + + +class NoOpGauge(Gauge): + """No-op implementation of ``Gauge``.""" + + def __init__( + self, + name: str, + unit: str = "", + description: str = "", + ) -> None: + super().__init__(name, unit=unit, description=description) + + def set( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + return super().set(amount, attributes=attributes, context=context) + + +class _ProxyGauge( + _ProxyInstrument[Gauge], + Gauge, +): + def set( + self, + amount: Union[int, float], + attributes: Optional[Attributes] = None, + context: Optional[Context] = None, + ) -> None: + if self._real_instrument: + self._real_instrument.set(amount, attributes, context) + + def _create_real_instrument(self, meter: "metrics.Meter") -> Gauge: + return meter.create_gauge( + self._name, + self._unit, + self._description, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/observation.py b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/observation.py new file mode 100644 index 00000000..ffc254b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/_internal/observation.py @@ -0,0 +1,63 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union + +from opentelemetry.context import Context +from opentelemetry.util.types import Attributes + + +class Observation: + """A measurement observed in an asynchronous instrument + + Return/yield instances of this class from asynchronous instrument callbacks. + + Args: + value: The float or int measured value + attributes: The measurement's attributes + context: The measurement's context + """ + + def __init__( + self, + value: Union[int, float], + attributes: Attributes = None, + context: Optional[Context] = None, + ) -> None: + self._value = value + self._attributes = attributes + self._context = context + + @property + def value(self) -> Union[float, int]: + return self._value + + @property + def attributes(self) -> Attributes: + return self._attributes + + @property + def context(self) -> Optional[Context]: + return self._context + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, Observation) + and self.value == other.value + and self.attributes == other.attributes + and self.context == other.context + ) + + def __repr__(self) -> str: + return f"Observation(value={self.value}, attributes={self.attributes}, context={self.context})" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/metrics/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/metrics/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/propagate/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/propagate/__init__.py new file mode 100644 index 00000000..afe406fb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/propagate/__init__.py @@ -0,0 +1,167 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +API for propagation of context. + +The propagators for the +``opentelemetry.propagators.composite.CompositePropagator`` can be defined +via configuration in the ``OTEL_PROPAGATORS`` environment variable. This +variable should be set to a comma-separated string of names of values for the +``opentelemetry_propagator`` entry point. For example, setting +``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value) +would instantiate +``opentelemetry.propagators.composite.CompositePropagator`` with 2 +propagators, one of type +``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator`` +and other of type ``opentelemetry.baggage.propagation.W3CBaggagePropagator``. +Notice that these propagator classes are defined as +``opentelemetry_propagator`` entry points in the ``pyproject.toml`` file of +``opentelemetry``. + +Example:: + + import flask + import requests + from opentelemetry import propagate + + + PROPAGATOR = propagate.get_global_textmap() + + + def get_header_from_flask_request(request, key): + return request.headers.get_all(key) + + def set_header_into_requests_request(request: requests.Request, + key: str, value: str): + request.headers[key] = value + + def example_route(): + context = PROPAGATOR.extract( + get_header_from_flask_request, + flask.request + ) + request_to_downstream = requests.Request( + "GET", "http://httpbin.org/get" + ) + PROPAGATOR.inject( + set_header_into_requests_request, + request_to_downstream, + context=context + ) + session = requests.Session() + session.send(request_to_downstream.prepare()) + + +.. _Propagation API Specification: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md +""" + +from logging import getLogger +from os import environ +from typing import Optional + +from opentelemetry.context.context import Context +from opentelemetry.environment_variables import OTEL_PROPAGATORS +from opentelemetry.propagators import composite, textmap +from opentelemetry.util._importlib_metadata import entry_points + +logger = getLogger(__name__) + + +def extract( + carrier: textmap.CarrierT, + context: Optional[Context] = None, + getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, +) -> Context: + """Uses the configured propagator to extract a Context from the carrier. + + Args: + getter: an object which contains a get function that can retrieve zero + or more values from the carrier and a keys function that can get all the keys + from carrier. + carrier: and object which contains values that are + used to construct a Context. This object + must be paired with an appropriate getter + which understands how to extract a value from it. + context: an optional Context to use. Defaults to root + context if not set. + """ + return get_global_textmap().extract(carrier, context, getter=getter) + + +def inject( + carrier: textmap.CarrierT, + context: Optional[Context] = None, + setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, +) -> None: + """Uses the configured propagator to inject a Context into the carrier. + + Args: + carrier: the medium used by Propagators to read + values from and write values to. + Should be paired with setter, which + should know how to set header values on the carrier. + context: An optional Context to use. Defaults to current + context if not set. + setter: An optional `Setter` object that can set values + on the carrier. + """ + get_global_textmap().inject(carrier, context=context, setter=setter) + + +propagators = [] + +# Single use variable here to hack black and make lint pass +environ_propagators = environ.get( + OTEL_PROPAGATORS, + "tracecontext,baggage", +) + + +for propagator in environ_propagators.split(","): + propagator = propagator.strip() + + try: + propagators.append( # type: ignore + next( # type: ignore + iter( # type: ignore + entry_points( # type: ignore + group="opentelemetry_propagator", + name=propagator, + ) + ) + ).load()() + ) + except StopIteration: + raise ValueError( + f"Propagator {propagator} not found. It is either misspelled or not installed." + ) + except Exception: # pylint: disable=broad-exception-caught + logger.exception("Failed to load propagator: %s", propagator) + raise + + +_HTTP_TEXT_FORMAT = composite.CompositePropagator(propagators) # type: ignore + + +def get_global_textmap() -> textmap.TextMapPropagator: + return _HTTP_TEXT_FORMAT + + +def set_global_textmap( + http_text_format: textmap.TextMapPropagator, +) -> None: + global _HTTP_TEXT_FORMAT # pylint:disable=global-statement + _HTTP_TEXT_FORMAT = http_text_format # type: ignore diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/propagate/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/propagate/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/propagate/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/propagators/composite.py b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/composite.py new file mode 100644 index 00000000..77330d94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/composite.py @@ -0,0 +1,91 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import typing + +from deprecated import deprecated + +from opentelemetry.context.context import Context +from opentelemetry.propagators import textmap + +logger = logging.getLogger(__name__) + + +class CompositePropagator(textmap.TextMapPropagator): + """CompositePropagator provides a mechanism for combining multiple + propagators into a single one. + + Args: + propagators: the list of propagators to use + """ + + def __init__( + self, propagators: typing.Sequence[textmap.TextMapPropagator] + ) -> None: + self._propagators = propagators + + def extract( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, + ) -> Context: + """Run each of the configured propagators with the given context and carrier. + Propagators are run in the order they are configured, if multiple + propagators write the same context key, the propagator later in the list + will override previous propagators. + + See `opentelemetry.propagators.textmap.TextMapPropagator.extract` + """ + for propagator in self._propagators: + context = propagator.extract(carrier, context, getter=getter) + return context # type: ignore + + def inject( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, + ) -> None: + """Run each of the configured propagators with the given context and carrier. + Propagators are run in the order they are configured, if multiple + propagators write the same carrier key, the propagator later in the list + will override previous propagators. + + See `opentelemetry.propagators.textmap.TextMapPropagator.inject` + """ + for propagator in self._propagators: + propagator.inject(carrier, context, setter=setter) + + @property + def fields(self) -> typing.Set[str]: + """Returns a set with the fields set in `inject`. + + See + `opentelemetry.propagators.textmap.TextMapPropagator.fields` + """ + composite_fields = set() + + for propagator in self._propagators: + for field in propagator.fields: + composite_fields.add(field) + + return composite_fields + + +@deprecated(version="1.2.0", reason="You should use CompositePropagator") # type: ignore +class CompositeHTTPPropagator(CompositePropagator): + """CompositeHTTPPropagator provides a mechanism for combining multiple + propagators into a single one. + """ diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/propagators/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/propagators/textmap.py b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/textmap.py new file mode 100644 index 00000000..42f1124f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/propagators/textmap.py @@ -0,0 +1,197 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import typing + +from opentelemetry.context.context import Context + +CarrierT = typing.TypeVar("CarrierT") +# pylint: disable=invalid-name +CarrierValT = typing.Union[typing.List[str], str] + + +class Getter(abc.ABC, typing.Generic[CarrierT]): + """This class implements a Getter that enables extracting propagated + fields from a carrier. + """ + + @abc.abstractmethod + def get( + self, carrier: CarrierT, key: str + ) -> typing.Optional[typing.List[str]]: + """Function that can retrieve zero + or more values from the carrier. In the case that + the value does not exist, returns None. + + Args: + carrier: An object which contains values that are used to + construct a Context. + key: key of a field in carrier. + Returns: first value of the propagation key or None if the key doesn't + exist. + """ + + @abc.abstractmethod + def keys(self, carrier: CarrierT) -> typing.List[str]: + """Function that can retrieve all the keys in a carrier object. + + Args: + carrier: An object which contains values that are + used to construct a Context. + Returns: + list of keys from the carrier. + """ + + +class Setter(abc.ABC, typing.Generic[CarrierT]): + """This class implements a Setter that enables injecting propagated + fields into a carrier. + """ + + @abc.abstractmethod + def set(self, carrier: CarrierT, key: str, value: str) -> None: + """Function that can set a value into a carrier"" + + Args: + carrier: An object which contains values that are used to + construct a Context. + key: key of a field in carrier. + value: value for a field in carrier. + """ + + +class DefaultGetter(Getter[typing.Mapping[str, CarrierValT]]): + def get( + self, carrier: typing.Mapping[str, CarrierValT], key: str + ) -> typing.Optional[typing.List[str]]: + """Getter implementation to retrieve a value from a dictionary. + + Args: + carrier: dictionary in which to get value + key: the key used to get the value + Returns: + A list with a single string with the value if it exists, else None. + """ + val = carrier.get(key, None) + if val is None: + return None + if isinstance(val, typing.Iterable) and not isinstance(val, str): + return list(val) + return [val] + + def keys( + self, carrier: typing.Mapping[str, CarrierValT] + ) -> typing.List[str]: + """Keys implementation that returns all keys from a dictionary.""" + return list(carrier.keys()) + + +default_getter: Getter[CarrierT] = DefaultGetter() # type: ignore + + +class DefaultSetter(Setter[typing.MutableMapping[str, CarrierValT]]): + def set( + self, + carrier: typing.MutableMapping[str, CarrierValT], + key: str, + value: CarrierValT, + ) -> None: + """Setter implementation to set a value into a dictionary. + + Args: + carrier: dictionary in which to set value + key: the key used to set the value + value: the value to set + """ + carrier[key] = value + + +default_setter: Setter[CarrierT] = DefaultSetter() # type: ignore + + +class TextMapPropagator(abc.ABC): + """This class provides an interface that enables extracting and injecting + context into headers of HTTP requests. HTTP frameworks and clients + can integrate with TextMapPropagator by providing the object containing the + headers, and a getter and setter function for the extraction and + injection of values, respectively. + + """ + + @abc.abstractmethod + def extract( + self, + carrier: CarrierT, + context: typing.Optional[Context] = None, + getter: Getter[CarrierT] = default_getter, + ) -> Context: + """Create a Context from values in the carrier. + + The extract function should retrieve values from the carrier + object using getter, and use values to populate a + Context value and return it. + + Args: + getter: a function that can retrieve zero + or more values from the carrier. In the case that + the value does not exist, return an empty list. + carrier: and object which contains values that are + used to construct a Context. This object + must be paired with an appropriate getter + which understands how to extract a value from it. + context: an optional Context to use. Defaults to root + context if not set. + Returns: + A Context with configuration found in the carrier. + + """ + + @abc.abstractmethod + def inject( + self, + carrier: CarrierT, + context: typing.Optional[Context] = None, + setter: Setter[CarrierT] = default_setter, + ) -> None: + """Inject values from a Context into a carrier. + + inject enables the propagation of values into HTTP clients or + other objects which perform an HTTP request. Implementations + should use the `Setter` 's set method to set values on the + carrier. + + Args: + carrier: An object that a place to define HTTP headers. + Should be paired with setter, which should + know how to set header values on the carrier. + context: an optional Context to use. Defaults to current + context if not set. + setter: An optional `Setter` object that can set values + on the carrier. + + """ + + @property + @abc.abstractmethod + def fields(self) -> typing.Set[str]: + """ + Gets the fields set in the carrier by the `inject` method. + + If the carrier is reused, its fields that correspond with the ones + present in this attribute should be deleted before calling `inject`. + + Returns: + A set with the fields set in `inject`. + """ diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/__init__.py new file mode 100644 index 00000000..628a8ab7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/__init__.py @@ -0,0 +1,27 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=import-error + +from .app_service import AzureAppServiceResourceDetector +from .functions import AzureFunctionsResourceDetector +from .version import __version__ +from .vm import AzureVMResourceDetector + +__all__ = [ + "AzureAppServiceResourceDetector", + "AzureFunctionsResourceDetector", + "AzureVMResourceDetector", + "__version__", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_constants.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_constants.py new file mode 100644 index 00000000..3a6415e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_constants.py @@ -0,0 +1,74 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.semconv.resource import ResourceAttributes + +# cSpell:disable + +# Azure Kubernetes + +_AKS_ARM_NAMESPACE_ID = "AKS_ARM_NAMESPACE_ID" + +# AppService + +_AZURE_APP_SERVICE_STAMP_RESOURCE_ATTRIBUTE = "azure.app.service.stamp" +_REGION_NAME = "REGION_NAME" +_WEBSITE_HOME_STAMPNAME = "WEBSITE_HOME_STAMPNAME" +_WEBSITE_HOSTNAME = "WEBSITE_HOSTNAME" +_WEBSITE_INSTANCE_ID = "WEBSITE_INSTANCE_ID" +_WEBSITE_OWNER_NAME = "WEBSITE_OWNER_NAME" +_WEBSITE_RESOURCE_GROUP = "WEBSITE_RESOURCE_GROUP" +_WEBSITE_SITE_NAME = "WEBSITE_SITE_NAME" +_WEBSITE_SLOT_NAME = "WEBSITE_SLOT_NAME" + +_APP_SERVICE_ATTRIBUTE_ENV_VARS = { + ResourceAttributes.CLOUD_REGION: _REGION_NAME, + ResourceAttributes.DEPLOYMENT_ENVIRONMENT: _WEBSITE_SLOT_NAME, + ResourceAttributes.HOST_ID: _WEBSITE_HOSTNAME, + ResourceAttributes.SERVICE_INSTANCE_ID: _WEBSITE_INSTANCE_ID, + _AZURE_APP_SERVICE_STAMP_RESOURCE_ATTRIBUTE: _WEBSITE_HOME_STAMPNAME, +} + +# Functions + +_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME" +_WEBSITE_MEMORY_LIMIT_MB = "WEBSITE_MEMORY_LIMIT_MB" + +_FUNCTIONS_ATTRIBUTE_ENV_VARS = { + ResourceAttributes.FAAS_INSTANCE: _WEBSITE_INSTANCE_ID, + ResourceAttributes.FAAS_MAX_MEMORY: _WEBSITE_MEMORY_LIMIT_MB, +} + +# Vm + +_AZURE_VM_METADATA_ENDPOINT = "http://169.254.169.254/metadata/instance/compute?api-version=2021-12-13&format=json" +_AZURE_VM_SCALE_SET_NAME_ATTRIBUTE = "azure.vm.scaleset.name" +_AZURE_VM_SKU_ATTRIBUTE = "azure.vm.sku" + +_EXPECTED_AZURE_AMS_ATTRIBUTES = [ + _AZURE_VM_SCALE_SET_NAME_ATTRIBUTE, + _AZURE_VM_SKU_ATTRIBUTE, + ResourceAttributes.CLOUD_PLATFORM, + ResourceAttributes.CLOUD_PROVIDER, + ResourceAttributes.CLOUD_REGION, + ResourceAttributes.CLOUD_RESOURCE_ID, + ResourceAttributes.HOST_ID, + ResourceAttributes.HOST_NAME, + ResourceAttributes.HOST_TYPE, + ResourceAttributes.OS_TYPE, + ResourceAttributes.OS_VERSION, + ResourceAttributes.SERVICE_INSTANCE_ID, +] + +# cSpell:enable diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_utils.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_utils.py new file mode 100644 index 00000000..62d00c5a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/_utils.py @@ -0,0 +1,54 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from os import environ +from typing import Optional + +from ._constants import ( + _AKS_ARM_NAMESPACE_ID, + _FUNCTIONS_WORKER_RUNTIME, + _WEBSITE_OWNER_NAME, + _WEBSITE_RESOURCE_GROUP, + _WEBSITE_SITE_NAME, +) + + +def _is_on_aks() -> bool: + return environ.get(_AKS_ARM_NAMESPACE_ID) is not None + + +def _is_on_app_service() -> bool: + return environ.get(_WEBSITE_SITE_NAME) is not None + + +def _is_on_functions() -> bool: + return environ.get(_FUNCTIONS_WORKER_RUNTIME) is not None + + +def _can_ignore_vm_detect() -> bool: + return _is_on_aks() or _is_on_app_service() or _is_on_functions() + + +def _get_azure_resource_uri() -> Optional[str]: + website_site_name = environ.get(_WEBSITE_SITE_NAME) + website_resource_group = environ.get(_WEBSITE_RESOURCE_GROUP) + website_owner_name = environ.get(_WEBSITE_OWNER_NAME) + + subscription_id = website_owner_name + if website_owner_name and "+" in website_owner_name: + subscription_id = website_owner_name[0 : website_owner_name.index("+")] + + if not (website_site_name and website_resource_group and subscription_id): + return None + + return f"/subscriptions/{subscription_id}/resourceGroups/{website_resource_group}/providers/Microsoft.Web/sites/{website_site_name}" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/app_service.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/app_service.py new file mode 100644 index 00000000..41371b8e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/app_service.py @@ -0,0 +1,59 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional +from os import environ + +from opentelemetry.sdk.resources import Resource, ResourceDetector +from opentelemetry.semconv.resource import ( + CloudPlatformValues, + CloudProviderValues, + ResourceAttributes, +) +from opentelemetry.resource.detector.azure._utils import _get_azure_resource_uri + +from ._constants import ( + _APP_SERVICE_ATTRIBUTE_ENV_VARS, + _WEBSITE_SITE_NAME, +) + +from opentelemetry.resource.detector.azure._utils import _is_on_functions + + +class AzureAppServiceResourceDetector(ResourceDetector): + def detect(self) -> Resource: + attributes = {} + website_site_name = environ.get(_WEBSITE_SITE_NAME) + if website_site_name: + # Functions resource detector takes priority with `service.name` and `cloud.platform` + if not _is_on_functions(): + attributes[ResourceAttributes.SERVICE_NAME] = website_site_name + attributes[ResourceAttributes.CLOUD_PLATFORM] = ( + CloudPlatformValues.AZURE_APP_SERVICE.value + ) + attributes[ResourceAttributes.CLOUD_PROVIDER] = ( + CloudProviderValues.AZURE.value + ) + + azure_resource_uri = _get_azure_resource_uri() + if azure_resource_uri: + attributes[ResourceAttributes.CLOUD_RESOURCE_ID] = ( + azure_resource_uri + ) + for key, env_var in _APP_SERVICE_ATTRIBUTE_ENV_VARS.items(): + value = environ.get(env_var) + if value: + attributes[key] = value + + return Resource(attributes) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/functions.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/functions.py new file mode 100644 index 00000000..0bf9a10f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/functions.py @@ -0,0 +1,68 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import environ, getpid + +from opentelemetry.sdk.resources import Resource, ResourceDetector +from opentelemetry.semconv.resource import ( + CloudPlatformValues, + CloudProviderValues, + ResourceAttributes, +) + +from ._constants import ( + _FUNCTIONS_ATTRIBUTE_ENV_VARS, + _REGION_NAME, + _WEBSITE_SITE_NAME, +) +from opentelemetry.resource.detector.azure._utils import ( + _get_azure_resource_uri, + _is_on_functions, +) + + +class AzureFunctionsResourceDetector(ResourceDetector): + def detect(self) -> Resource: + attributes = {} + if _is_on_functions(): + website_site_name = environ.get(_WEBSITE_SITE_NAME) + if website_site_name: + attributes[ResourceAttributes.SERVICE_NAME] = website_site_name + attributes[ResourceAttributes.PROCESS_PID] = getpid() + attributes[ResourceAttributes.CLOUD_PROVIDER] = ( + CloudProviderValues.AZURE.value + ) + attributes[ResourceAttributes.CLOUD_PLATFORM] = ( + CloudPlatformValues.AZURE_FUNCTIONS.value + ) + cloud_region = environ.get(_REGION_NAME) + if cloud_region: + attributes[ResourceAttributes.CLOUD_REGION] = cloud_region + azure_resource_uri = _get_azure_resource_uri() + if azure_resource_uri: + attributes[ResourceAttributes.CLOUD_RESOURCE_ID] = ( + azure_resource_uri + ) + for key, env_var in _FUNCTIONS_ATTRIBUTE_ENV_VARS.items(): + value = environ.get(env_var) + if value: + if key == ResourceAttributes.FAAS_MAX_MEMORY: + try: + value = int(value) + except ValueError: + continue + attributes[key] = value + + return Resource(attributes) + diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/version.py new file mode 100644 index 00000000..fac29d77 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.1.5" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/vm.py b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/vm.py new file mode 100644 index 00000000..21122829 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/resource/detector/azure/vm.py @@ -0,0 +1,103 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from json import loads +from logging import getLogger +from urllib.error import URLError +from urllib.request import Request, urlopen + +from opentelemetry.context import ( + _SUPPRESS_INSTRUMENTATION_KEY, + attach, + detach, + set_value, +) +from opentelemetry.sdk.resources import Resource, ResourceDetector +from opentelemetry.semconv.resource import ( + CloudPlatformValues, + CloudProviderValues, + ResourceAttributes, +) + +from ._constants import ( + _AZURE_VM_METADATA_ENDPOINT, + _AZURE_VM_SCALE_SET_NAME_ATTRIBUTE, + _AZURE_VM_SKU_ATTRIBUTE, + _EXPECTED_AZURE_AMS_ATTRIBUTES, +) +from ._utils import _can_ignore_vm_detect + +_logger = getLogger(__name__) + + +class AzureVMResourceDetector(ResourceDetector): + # pylint: disable=no-self-use + def detect(self) -> "Resource": + attributes = {} + if not _can_ignore_vm_detect(): + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + metadata_json = _get_azure_vm_metadata() + if not metadata_json: + return Resource(attributes) + for attribute_key in _EXPECTED_AZURE_AMS_ATTRIBUTES: + attributes[attribute_key] = _get_attribute_from_metadata( + metadata_json, attribute_key + ) + detach(token) + return Resource(attributes) + + +def _get_azure_vm_metadata(): + request = Request(_AZURE_VM_METADATA_ENDPOINT) + request.add_header("Metadata", "True") + try: + # VM metadata service should not take more than 200ms on success case + with urlopen(request, timeout=0.2) as response: + return loads(response.read()) + except URLError: + # Not on Azure VM + return None + except Exception as e: # pylint: disable=broad-except,invalid-name + _logger.exception("Failed to receive Azure VM metadata: %s", e) + return None + + +def _get_attribute_from_metadata(metadata_json, attribute_key): + ams_value = "" + if attribute_key == _AZURE_VM_SCALE_SET_NAME_ATTRIBUTE: + ams_value = metadata_json["vmScaleSetName"] + elif attribute_key == _AZURE_VM_SKU_ATTRIBUTE: + ams_value = metadata_json["sku"] + elif attribute_key == ResourceAttributes.CLOUD_PLATFORM: + ams_value = CloudPlatformValues.AZURE_VM.value + elif attribute_key == ResourceAttributes.CLOUD_PROVIDER: + ams_value = CloudProviderValues.AZURE.value + elif attribute_key == ResourceAttributes.CLOUD_REGION: + ams_value = metadata_json["location"] + elif attribute_key == ResourceAttributes.CLOUD_RESOURCE_ID: + ams_value = metadata_json["resourceId"] + elif attribute_key in ( + ResourceAttributes.HOST_ID, + ResourceAttributes.SERVICE_INSTANCE_ID, + ): + ams_value = metadata_json["vmId"] + elif attribute_key == ResourceAttributes.HOST_NAME: + ams_value = metadata_json["name"] + elif attribute_key == ResourceAttributes.HOST_TYPE: + ams_value = metadata_json["vmSize"] + elif attribute_key == ResourceAttributes.OS_TYPE: + ams_value = metadata_json["osType"] + elif attribute_key == ResourceAttributes.OS_VERSION: + ams_value = metadata_json["version"] + return ams_value diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi new file mode 100644 index 00000000..e57edc0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The OpenTelemetry SDK package is an implementation of the OpenTelemetry +API +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py new file mode 100644 index 00000000..c1852edd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_configuration/__init__.py @@ -0,0 +1,460 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +OpenTelemetry SDK Configurator for Easy Instrumentation with Distros +""" + +from __future__ import annotations + +import logging +import os +from abc import ABC, abstractmethod +from os import environ +from typing import Callable, Sequence, Type, Union + +from typing_extensions import Literal + +from opentelemetry._events import set_event_logger_provider +from opentelemetry._logs import set_logger_provider +from opentelemetry.environment_variables import ( + OTEL_LOGS_EXPORTER, + OTEL_METRICS_EXPORTER, + OTEL_PYTHON_ID_GENERATOR, + OTEL_TRACES_EXPORTER, +) +from opentelemetry.metrics import set_meter_provider +from opentelemetry.sdk._events import EventLoggerProvider +from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, LogExporter +from opentelemetry.sdk.environment_variables import ( + _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, + OTEL_EXPORTER_OTLP_LOGS_PROTOCOL, + OTEL_EXPORTER_OTLP_METRICS_PROTOCOL, + OTEL_EXPORTER_OTLP_PROTOCOL, + OTEL_EXPORTER_OTLP_TRACES_PROTOCOL, + OTEL_TRACES_SAMPLER, + OTEL_TRACES_SAMPLER_ARG, +) +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import ( + MetricExporter, + MetricReader, + PeriodicExportingMetricReader, +) +from opentelemetry.sdk.resources import Attributes, Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter +from opentelemetry.sdk.trace.id_generator import IdGenerator +from opentelemetry.sdk.trace.sampling import Sampler +from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.trace import set_tracer_provider +from opentelemetry.util._importlib_metadata import entry_points + +_EXPORTER_OTLP = "otlp" +_EXPORTER_OTLP_PROTO_GRPC = "otlp_proto_grpc" +_EXPORTER_OTLP_PROTO_HTTP = "otlp_proto_http" + +_EXPORTER_BY_OTLP_PROTOCOL = { + "grpc": _EXPORTER_OTLP_PROTO_GRPC, + "http/protobuf": _EXPORTER_OTLP_PROTO_HTTP, +} + +_EXPORTER_ENV_BY_SIGNAL_TYPE = { + "traces": OTEL_TRACES_EXPORTER, + "metrics": OTEL_METRICS_EXPORTER, + "logs": OTEL_LOGS_EXPORTER, +} + +_PROTOCOL_ENV_BY_SIGNAL_TYPE = { + "traces": OTEL_EXPORTER_OTLP_TRACES_PROTOCOL, + "metrics": OTEL_EXPORTER_OTLP_METRICS_PROTOCOL, + "logs": OTEL_EXPORTER_OTLP_LOGS_PROTOCOL, +} + +_RANDOM_ID_GENERATOR = "random" +_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR + +_OTEL_SAMPLER_ENTRY_POINT_GROUP = "opentelemetry_traces_sampler" + +_logger = logging.getLogger(__name__) + + +def _import_config_components( + selected_components: list[str], entry_point_name: str +) -> Sequence[tuple[str, object]]: + component_implementations = [] + + for selected_component in selected_components: + try: + component_implementations.append( + ( + selected_component, + next( + iter( + entry_points( + group=entry_point_name, name=selected_component + ) + ) + ).load(), + ) + ) + except KeyError: + raise RuntimeError( + f"Requested entry point '{entry_point_name}' not found" + ) + + except StopIteration: + raise RuntimeError( + f"Requested component '{selected_component}' not found in " + f"entry point '{entry_point_name}'" + ) + + return component_implementations + + +def _get_sampler() -> str | None: + return environ.get(OTEL_TRACES_SAMPLER, None) + + +def _get_id_generator() -> str: + return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR) + + +def _get_exporter_entry_point( + exporter_name: str, signal_type: Literal["traces", "metrics", "logs"] +): + if exporter_name not in ( + _EXPORTER_OTLP, + _EXPORTER_OTLP_PROTO_GRPC, + _EXPORTER_OTLP_PROTO_HTTP, + ): + return exporter_name + + # Checking env vars for OTLP protocol (grpc/http). + otlp_protocol = environ.get( + _PROTOCOL_ENV_BY_SIGNAL_TYPE[signal_type] + ) or environ.get(OTEL_EXPORTER_OTLP_PROTOCOL) + + if not otlp_protocol: + if exporter_name == _EXPORTER_OTLP: + return _EXPORTER_OTLP_PROTO_GRPC + return exporter_name + + otlp_protocol = otlp_protocol.strip() + + if exporter_name == _EXPORTER_OTLP: + if otlp_protocol not in _EXPORTER_BY_OTLP_PROTOCOL: + # Invalid value was set by the env var + raise RuntimeError( + f"Unsupported OTLP protocol '{otlp_protocol}' is configured" + ) + + return _EXPORTER_BY_OTLP_PROTOCOL[otlp_protocol] + + # grpc/http already specified by exporter_name, only add a warning in case + # of a conflict. + exporter_name_by_env = _EXPORTER_BY_OTLP_PROTOCOL.get(otlp_protocol) + if exporter_name_by_env and exporter_name != exporter_name_by_env: + _logger.warning( + "Conflicting values for %s OTLP exporter protocol, using '%s'", + signal_type, + exporter_name, + ) + + return exporter_name + + +def _get_exporter_names( + signal_type: Literal["traces", "metrics", "logs"], +) -> Sequence[str]: + names = environ.get(_EXPORTER_ENV_BY_SIGNAL_TYPE.get(signal_type, "")) + + if not names or names.lower().strip() == "none": + return [] + + return [ + _get_exporter_entry_point(_exporter.strip(), signal_type) + for _exporter in names.split(",") + ] + + +def _init_tracing( + exporters: dict[str, Type[SpanExporter]], + id_generator: IdGenerator | None = None, + sampler: Sampler | None = None, + resource: Resource | None = None, +): + provider = TracerProvider( + id_generator=id_generator, + sampler=sampler, + resource=resource, + ) + set_tracer_provider(provider) + + for _, exporter_class in exporters.items(): + exporter_args = {} + provider.add_span_processor( + BatchSpanProcessor(exporter_class(**exporter_args)) + ) + + +def _init_metrics( + exporters_or_readers: dict[ + str, Union[Type[MetricExporter], Type[MetricReader]] + ], + resource: Resource | None = None, +): + metric_readers = [] + + for _, exporter_or_reader_class in exporters_or_readers.items(): + exporter_args = {} + + if issubclass(exporter_or_reader_class, MetricReader): + metric_readers.append(exporter_or_reader_class(**exporter_args)) + else: + metric_readers.append( + PeriodicExportingMetricReader( + exporter_or_reader_class(**exporter_args) + ) + ) + + provider = MeterProvider(resource=resource, metric_readers=metric_readers) + set_meter_provider(provider) + + +def _init_logging( + exporters: dict[str, Type[LogExporter]], + resource: Resource | None = None, + setup_logging_handler: bool = True, +): + provider = LoggerProvider(resource=resource) + set_logger_provider(provider) + + for _, exporter_class in exporters.items(): + exporter_args = {} + provider.add_log_record_processor( + BatchLogRecordProcessor(exporter_class(**exporter_args)) + ) + + event_logger_provider = EventLoggerProvider(logger_provider=provider) + set_event_logger_provider(event_logger_provider) + + if setup_logging_handler: + handler = LoggingHandler( + level=logging.NOTSET, logger_provider=provider + ) + logging.getLogger().addHandler(handler) + + +def _import_exporters( + trace_exporter_names: Sequence[str], + metric_exporter_names: Sequence[str], + log_exporter_names: Sequence[str], +) -> tuple[ + dict[str, Type[SpanExporter]], + dict[str, Union[Type[MetricExporter], Type[MetricReader]]], + dict[str, Type[LogExporter]], +]: + trace_exporters = {} + metric_exporters = {} + log_exporters = {} + + for ( + exporter_name, + exporter_impl, + ) in _import_config_components( + trace_exporter_names, "opentelemetry_traces_exporter" + ): + if issubclass(exporter_impl, SpanExporter): + trace_exporters[exporter_name] = exporter_impl + else: + raise RuntimeError(f"{exporter_name} is not a trace exporter") + + for ( + exporter_name, + exporter_impl, + ) in _import_config_components( + metric_exporter_names, "opentelemetry_metrics_exporter" + ): + # The metric exporter components may be push MetricExporter or pull exporters which + # subclass MetricReader directly + if issubclass(exporter_impl, (MetricExporter, MetricReader)): + metric_exporters[exporter_name] = exporter_impl + else: + raise RuntimeError(f"{exporter_name} is not a metric exporter") + + for ( + exporter_name, + exporter_impl, + ) in _import_config_components( + log_exporter_names, "opentelemetry_logs_exporter" + ): + if issubclass(exporter_impl, LogExporter): + log_exporters[exporter_name] = exporter_impl + else: + raise RuntimeError(f"{exporter_name} is not a log exporter") + + return trace_exporters, metric_exporters, log_exporters + + +def _import_sampler_factory(sampler_name: str) -> Callable[[str], Sampler]: + _, sampler_impl = _import_config_components( + [sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP + )[0] + return sampler_impl + + +def _import_sampler(sampler_name: str) -> Sampler | None: + if not sampler_name: + return None + try: + sampler_factory = _import_sampler_factory(sampler_name) + arg = None + if sampler_name in ("traceidratio", "parentbased_traceidratio"): + try: + rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) + except (ValueError, TypeError): + _logger.warning( + "Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0." + ) + rate = 1.0 + arg = rate + else: + arg = os.getenv(OTEL_TRACES_SAMPLER_ARG) + + sampler = sampler_factory(arg) + if not isinstance(sampler, Sampler): + message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler." + _logger.warning(message) + raise ValueError(message) + return sampler + except Exception as exc: # pylint: disable=broad-exception-caught + _logger.warning( + "Using default sampler. Failed to initialize sampler, %s: %s", + sampler_name, + exc, + ) + return None + + +def _import_id_generator(id_generator_name: str) -> IdGenerator: + id_generator_name, id_generator_impl = _import_config_components( + [id_generator_name.strip()], "opentelemetry_id_generator" + )[0] + + if issubclass(id_generator_impl, IdGenerator): + return id_generator_impl() + + raise RuntimeError(f"{id_generator_name} is not an IdGenerator") + + +def _initialize_components( + auto_instrumentation_version: str | None = None, + trace_exporter_names: list[str] | None = None, + metric_exporter_names: list[str] | None = None, + log_exporter_names: list[str] | None = None, + sampler: Sampler | None = None, + resource_attributes: Attributes | None = None, + id_generator: IdGenerator | None = None, + setup_logging_handler: bool | None = None, +): + if trace_exporter_names is None: + trace_exporter_names = [] + if metric_exporter_names is None: + metric_exporter_names = [] + if log_exporter_names is None: + log_exporter_names = [] + span_exporters, metric_exporters, log_exporters = _import_exporters( + trace_exporter_names + _get_exporter_names("traces"), + metric_exporter_names + _get_exporter_names("metrics"), + log_exporter_names + _get_exporter_names("logs"), + ) + if sampler is None: + sampler_name = _get_sampler() + sampler = _import_sampler(sampler_name) + if id_generator is None: + id_generator_name = _get_id_generator() + id_generator = _import_id_generator(id_generator_name) + if resource_attributes is None: + resource_attributes = {} + # populate version if using auto-instrumentation + if auto_instrumentation_version: + resource_attributes[ResourceAttributes.TELEMETRY_AUTO_VERSION] = ( + auto_instrumentation_version + ) + # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name + # from the env variable else defaults to "unknown_service" + resource = Resource.create(resource_attributes) + + _init_tracing( + exporters=span_exporters, + id_generator=id_generator, + sampler=sampler, + resource=resource, + ) + _init_metrics(metric_exporters, resource) + if setup_logging_handler is None: + setup_logging_handler = ( + os.getenv( + _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, "false" + ) + .strip() + .lower() + == "true" + ) + _init_logging(log_exporters, resource, setup_logging_handler) + + +class _BaseConfigurator(ABC): + """An ABC for configurators + + Configurators are used to configure + SDKs (i.e. TracerProvider, MeterProvider, Processors...) + to reduce the amount of manual configuration required. + """ + + _instance = None + _is_instrumented = False + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = object.__new__(cls, *args, **kwargs) + + return cls._instance + + @abstractmethod + def _configure(self, **kwargs): + """Configure the SDK""" + + def configure(self, **kwargs): + """Configure the SDK""" + self._configure(**kwargs) + + +class _OTelSDKConfigurator(_BaseConfigurator): + """A basic Configurator by OTel Python for initializing OTel SDK components + + Initializes several crucial OTel SDK components (i.e. TracerProvider, + MeterProvider, Processors...) according to a default implementation. Other + Configurators can subclass and slightly alter this initialization. + + NOTE: This class should not be instantiated nor should it become an entry + point on the `opentelemetry-sdk` package. Instead, distros should subclass + this Configurator and enhance it as needed. + """ + + def _configure(self, **kwargs): + _initialize_components(**kwargs) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py new file mode 100644 index 00000000..ae163025 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_events/__init__.py @@ -0,0 +1,89 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from time import time_ns +from typing import Optional + +from opentelemetry import trace +from opentelemetry._events import Event +from opentelemetry._events import EventLogger as APIEventLogger +from opentelemetry._events import EventLoggerProvider as APIEventLoggerProvider +from opentelemetry._logs import NoOpLogger, SeverityNumber, get_logger_provider +from opentelemetry.sdk._logs import Logger, LoggerProvider, LogRecord +from opentelemetry.util.types import Attributes + +_logger = logging.getLogger(__name__) + + +class EventLogger(APIEventLogger): + def __init__( + self, + logger_provider: LoggerProvider, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ): + super().__init__( + name=name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + self._logger: Logger = logger_provider.get_logger( + name, version, schema_url, attributes + ) + + def emit(self, event: Event) -> None: + if isinstance(self._logger, NoOpLogger): + # Do nothing if SDK is disabled + return + span_context = trace.get_current_span().get_span_context() + log_record = LogRecord( + timestamp=event.timestamp or time_ns(), + observed_timestamp=None, + trace_id=event.trace_id or span_context.trace_id, + span_id=event.span_id or span_context.span_id, + trace_flags=event.trace_flags or span_context.trace_flags, + severity_text=None, + severity_number=event.severity_number or SeverityNumber.INFO, + body=event.body, + resource=getattr(self._logger, "resource", None), + attributes=event.attributes, + ) + self._logger.emit(log_record) + + +class EventLoggerProvider(APIEventLoggerProvider): + def __init__(self, logger_provider: Optional[LoggerProvider] = None): + self._logger_provider = logger_provider or get_logger_provider() + + def get_event_logger( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> EventLogger: + if not name: + _logger.warning("EventLogger created with invalid name: %s", name) + return EventLogger( + self._logger_provider, name, version, schema_url, attributes + ) + + def shutdown(self): + self._logger_provider.shutdown() + + def force_flush(self, timeout_millis: int = 30000) -> bool: + self._logger_provider.force_flush(timeout_millis) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py new file mode 100644 index 00000000..0254c135 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/__init__.py @@ -0,0 +1,36 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.sdk._logs._internal import ( + LogData, + LogDroppedAttributesWarning, + Logger, + LoggerProvider, + LoggingHandler, + LogLimits, + LogRecord, + LogRecordProcessor, +) + +__all__ = [ + "LogData", + "Logger", + "LoggerProvider", + "LoggingHandler", + "LogLimits", + "LogRecord", + "LogRecordProcessor", + "LogDroppedAttributesWarning", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py new file mode 100644 index 00000000..302ca1ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/__init__.py @@ -0,0 +1,712 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import abc +import atexit +import concurrent.futures +import json +import logging +import threading +import traceback +import warnings +from os import environ +from threading import Lock +from time import time_ns +from typing import Any, Callable, Tuple, Union # noqa + +from opentelemetry._logs import Logger as APILogger +from opentelemetry._logs import LoggerProvider as APILoggerProvider +from opentelemetry._logs import LogRecord as APILogRecord +from opentelemetry._logs import ( + NoOpLogger, + SeverityNumber, + get_logger, + get_logger_provider, + std_to_otel, +) +from opentelemetry.attributes import BoundedAttributes +from opentelemetry.sdk.environment_variables import ( + OTEL_ATTRIBUTE_COUNT_LIMIT, + OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, + OTEL_SDK_DISABLED, +) +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.util import ns_to_iso_str +from opentelemetry.sdk.util.instrumentation import InstrumentationScope +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace import ( + format_span_id, + format_trace_id, + get_current_span, +) +from opentelemetry.trace.span import TraceFlags +from opentelemetry.util.types import AnyValue, Attributes + +_logger = logging.getLogger(__name__) + +_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128 +_ENV_VALUE_UNSET = "" + + +class LogDroppedAttributesWarning(UserWarning): + """Custom warning to indicate dropped log attributes due to limits. + + This class is used to filter and handle these specific warnings separately + from other warnings, ensuring that they are only shown once without + interfering with default user warnings. + """ + + +warnings.simplefilter("once", LogDroppedAttributesWarning) + + +class LogLimits: + """This class is based on a SpanLimits class in the Tracing module. + + This class represents the limits that should be enforced on recorded data such as events, links, attributes etc. + + This class does not enforce any limits itself. It only provides a way to read limits from env, + default values and from user provided arguments. + + All limit arguments must be either a non-negative integer, ``None`` or ``LogLimits.UNSET``. + + - All limit arguments are optional. + - If a limit argument is not set, the class will try to read its value from the corresponding + environment variable. + - If the environment variable is not set, the default value, if any, will be used. + + Limit precedence: + + - If a model specific limit is set, it will be used. + - Else if the corresponding global limit is set, it will be used. + - Else if the model specific limit has a default value, the default value will be used. + - Else if the global limit has a default value, the default value will be used. + + Args: + max_attributes: Maximum number of attributes that can be added to a span, event, and link. + Environment variable: ``OTEL_ATTRIBUTE_COUNT_LIMIT`` + Default: {_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT} + max_attribute_length: Maximum length an attribute value can have. Values longer than + the specified length will be truncated. + """ + + UNSET = -1 + + def __init__( + self, + max_attributes: int | None = None, + max_attribute_length: int | None = None, + ): + # attribute count + global_max_attributes = self._from_env_if_absent( + max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT + ) + self.max_attributes = ( + global_max_attributes + if global_max_attributes is not None + else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT + ) + + # attribute length + self.max_attribute_length = self._from_env_if_absent( + max_attribute_length, + OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, + ) + + def __repr__(self): + return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})" + + @classmethod + def _from_env_if_absent( + cls, value: int | None, env_var: str, default: int | None = None + ) -> int | None: + if value == cls.UNSET: + return None + + err_msg = "{} must be a non-negative integer but got {}" + + # if no value is provided for the limit, try to load it from env + if value is None: + # return default value if env var is not set + if env_var not in environ: + return default + + str_value = environ.get(env_var, "").strip().lower() + if str_value == _ENV_VALUE_UNSET: + return None + + try: + value = int(str_value) + except ValueError: + raise ValueError(err_msg.format(env_var, str_value)) + + if value < 0: + raise ValueError(err_msg.format(env_var, value)) + return value + + +_UnsetLogLimits = LogLimits( + max_attributes=LogLimits.UNSET, + max_attribute_length=LogLimits.UNSET, +) + + +class LogRecord(APILogRecord): + """A LogRecord instance represents an event being logged. + + LogRecord instances are created and emitted via `Logger` + every time something is logged. They contain all the information + pertinent to the event being logged. + """ + + def __init__( + self, + timestamp: int | None = None, + observed_timestamp: int | None = None, + trace_id: int | None = None, + span_id: int | None = None, + trace_flags: TraceFlags | None = None, + severity_text: str | None = None, + severity_number: SeverityNumber | None = None, + body: AnyValue | None = None, + resource: Resource | None = None, + attributes: Attributes | None = None, + limits: LogLimits | None = _UnsetLogLimits, + ): + super().__init__( + **{ + "timestamp": timestamp, + "observed_timestamp": observed_timestamp, + "trace_id": trace_id, + "span_id": span_id, + "trace_flags": trace_flags, + "severity_text": severity_text, + "severity_number": severity_number, + "body": body, + "attributes": BoundedAttributes( + maxlen=limits.max_attributes, + attributes=attributes if bool(attributes) else None, + immutable=False, + max_value_len=limits.max_attribute_length, + ), + } + ) + self.resource = ( + resource if isinstance(resource, Resource) else Resource.create({}) + ) + if self.dropped_attributes > 0: + warnings.warn( + "Log record attributes were dropped due to limits", + LogDroppedAttributesWarning, + stacklevel=2, + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, LogRecord): + return NotImplemented + return self.__dict__ == other.__dict__ + + def to_json(self, indent: int | None = 4) -> str: + return json.dumps( + { + "body": self.body, + "severity_number": self.severity_number.value + if self.severity_number is not None + else None, + "severity_text": self.severity_text, + "attributes": ( + dict(self.attributes) if bool(self.attributes) else None + ), + "dropped_attributes": self.dropped_attributes, + "timestamp": ns_to_iso_str(self.timestamp), + "observed_timestamp": ns_to_iso_str(self.observed_timestamp), + "trace_id": ( + f"0x{format_trace_id(self.trace_id)}" + if self.trace_id is not None + else "" + ), + "span_id": ( + f"0x{format_span_id(self.span_id)}" + if self.span_id is not None + else "" + ), + "trace_flags": self.trace_flags, + "resource": json.loads(self.resource.to_json()), + }, + indent=indent, + ) + + @property + def dropped_attributes(self) -> int: + if self.attributes: + return self.attributes.dropped + return 0 + + +class LogData: + """Readable LogRecord data plus associated InstrumentationLibrary.""" + + def __init__( + self, + log_record: LogRecord, + instrumentation_scope: InstrumentationScope, + ): + self.log_record = log_record + self.instrumentation_scope = instrumentation_scope + + +class LogRecordProcessor(abc.ABC): + """Interface to hook the log record emitting action. + + Log processors can be registered directly using + :func:`LoggerProvider.add_log_record_processor` and they are invoked + in the same order as they were registered. + """ + + @abc.abstractmethod + def emit(self, log_data: LogData): + """Emits the `LogData`""" + + @abc.abstractmethod + def shutdown(self): + """Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown""" + + @abc.abstractmethod + def force_flush(self, timeout_millis: int = 30000): + """Export all the received logs to the configured Exporter that have not yet + been exported. + + Args: + timeout_millis: The maximum amount of time to wait for logs to be + exported. + + Returns: + False if the timeout is exceeded, True otherwise. + """ + + +# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved +# pylint:disable=no-member +class SynchronousMultiLogRecordProcessor(LogRecordProcessor): + """Implementation of class:`LogRecordProcessor` that forwards all received + events to a list of log processors sequentially. + + The underlying log processors are called in sequential order as they were + added. + """ + + def __init__(self): + # use a tuple to avoid race conditions when adding a new log and + # iterating through it on "emit". + self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...] + self._lock = threading.Lock() + + def add_log_record_processor( + self, log_record_processor: LogRecordProcessor + ) -> None: + """Adds a Logprocessor to the list of log processors handled by this instance""" + with self._lock: + self._log_record_processors += (log_record_processor,) + + def emit(self, log_data: LogData) -> None: + for lp in self._log_record_processors: + lp.emit(log_data) + + def shutdown(self) -> None: + """Shutdown the log processors one by one""" + for lp in self._log_record_processors: + lp.shutdown() + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Force flush the log processors one by one + + Args: + timeout_millis: The maximum amount of time to wait for logs to be + exported. If the first n log processors exceeded the timeout + then remaining log processors will not be flushed. + + Returns: + True if all the log processors flushes the logs within timeout, + False otherwise. + """ + deadline_ns = time_ns() + timeout_millis * 1000000 + for lp in self._log_record_processors: + current_ts = time_ns() + if current_ts >= deadline_ns: + return False + + if not lp.force_flush((deadline_ns - current_ts) // 1000000): + return False + + return True + + +class ConcurrentMultiLogRecordProcessor(LogRecordProcessor): + """Implementation of :class:`LogRecordProcessor` that forwards all received + events to a list of log processors in parallel. + + Calls to the underlying log processors are forwarded in parallel by + submitting them to a thread pool executor and waiting until each log + processor finished its work. + + Args: + max_workers: The number of threads managed by the thread pool executor + and thus defining how many log processors can work in parallel. + """ + + def __init__(self, max_workers: int = 2): + # use a tuple to avoid race conditions when adding a new log and + # iterating through it on "emit". + self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...] + self._lock = threading.Lock() + self._executor = concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers + ) + + def add_log_record_processor( + self, log_record_processor: LogRecordProcessor + ): + with self._lock: + self._log_record_processors += (log_record_processor,) + + def _submit_and_wait( + self, + func: Callable[[LogRecordProcessor], Callable[..., None]], + *args: Any, + **kwargs: Any, + ): + futures = [] + for lp in self._log_record_processors: + future = self._executor.submit(func(lp), *args, **kwargs) + futures.append(future) + for future in futures: + future.result() + + def emit(self, log_data: LogData): + self._submit_and_wait(lambda lp: lp.emit, log_data) + + def shutdown(self): + self._submit_and_wait(lambda lp: lp.shutdown) + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Force flush the log processors in parallel. + + Args: + timeout_millis: The maximum amount of time to wait for logs to be + exported. + + Returns: + True if all the log processors flushes the logs within timeout, + False otherwise. + """ + futures = [] + for lp in self._log_record_processors: + future = self._executor.submit(lp.force_flush, timeout_millis) + futures.append(future) + + done_futures, not_done_futures = concurrent.futures.wait( + futures, timeout_millis / 1e3 + ) + + if not_done_futures: + return False + + for future in done_futures: + if not future.result(): + return False + + return True + + +# skip natural LogRecord attributes +# http://docs.python.org/library/logging.html#logrecord-attributes +_RESERVED_ATTRS = frozenset( + ( + "asctime", + "args", + "created", + "exc_info", + "exc_text", + "filename", + "funcName", + "getMessage", + "message", + "levelname", + "levelno", + "lineno", + "module", + "msecs", + "msg", + "name", + "pathname", + "process", + "processName", + "relativeCreated", + "stack_info", + "thread", + "threadName", + "taskName", + ) +) + + +class LoggingHandler(logging.Handler): + """A handler class which writes logging records, in OTLP format, to + a network destination or file. Supports signals from the `logging` module. + https://docs.python.org/3/library/logging.html + """ + + def __init__( + self, + level=logging.NOTSET, + logger_provider=None, + ) -> None: + super().__init__(level=level) + self._logger_provider = logger_provider or get_logger_provider() + + @staticmethod + def _get_attributes(record: logging.LogRecord) -> Attributes: + attributes = { + k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS + } + + # Add standard code attributes for logs. + attributes[SpanAttributes.CODE_FILEPATH] = record.pathname + attributes[SpanAttributes.CODE_FUNCTION] = record.funcName + attributes[SpanAttributes.CODE_LINENO] = record.lineno + + if record.exc_info: + exctype, value, tb = record.exc_info + if exctype is not None: + attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__ + if value is not None and value.args: + attributes[SpanAttributes.EXCEPTION_MESSAGE] = str( + value.args[0] + ) + if tb is not None: + # https://github.com/open-telemetry/opentelemetry-specification/blob/9fa7c656b26647b27e485a6af7e38dc716eba98a/specification/trace/semantic_conventions/exceptions.md#stacktrace-representation + attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join( + traceback.format_exception(*record.exc_info) + ) + return attributes + + def _translate(self, record: logging.LogRecord) -> LogRecord: + timestamp = int(record.created * 1e9) + observered_timestamp = time_ns() + span_context = get_current_span().get_span_context() + attributes = self._get_attributes(record) + severity_number = std_to_otel(record.levelno) + if self.formatter: + body = self.format(record) + else: + # `record.getMessage()` uses `record.msg` as a template to format + # `record.args` into. There is a special case in `record.getMessage()` + # where it will only attempt formatting if args are provided, + # otherwise, it just stringifies `record.msg`. + # + # Since the OTLP body field has a type of 'any' and the logging module + # is sometimes used in such a way that objects incorrectly end up + # set as record.msg, in those cases we would like to bypass + # `record.getMessage()` completely and set the body to the object + # itself instead of its string representation. + # For more background, see: https://github.com/open-telemetry/opentelemetry-python/pull/4216 + if not record.args and not isinstance(record.msg, str): + # no args are provided so it's *mostly* safe to use the message template as the body + body = record.msg + else: + body = record.getMessage() + + # related to https://github.com/open-telemetry/opentelemetry-python/issues/3548 + # Severity Text = WARN as defined in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity. + level_name = ( + "WARN" if record.levelname == "WARNING" else record.levelname + ) + + logger = get_logger(record.name, logger_provider=self._logger_provider) + return LogRecord( + timestamp=timestamp, + observed_timestamp=observered_timestamp, + trace_id=span_context.trace_id, + span_id=span_context.span_id, + trace_flags=span_context.trace_flags, + severity_text=level_name, + severity_number=severity_number, + body=body, + resource=logger.resource, + attributes=attributes, + ) + + def emit(self, record: logging.LogRecord) -> None: + """ + Emit a record. Skip emitting if logger is NoOp. + + The record is translated to OTel format, and then sent across the pipeline. + """ + logger = get_logger(record.name, logger_provider=self._logger_provider) + if not isinstance(logger, NoOpLogger): + logger.emit(self._translate(record)) + + def flush(self) -> None: + """ + Flushes the logging output. Skip flushing if logging_provider has no force_flush method. + """ + if hasattr(self._logger_provider, "force_flush") and callable( + self._logger_provider.force_flush + ): + self._logger_provider.force_flush() + + +class Logger(APILogger): + def __init__( + self, + resource: Resource, + multi_log_record_processor: Union[ + SynchronousMultiLogRecordProcessor, + ConcurrentMultiLogRecordProcessor, + ], + instrumentation_scope: InstrumentationScope, + ): + super().__init__( + instrumentation_scope.name, + instrumentation_scope.version, + instrumentation_scope.schema_url, + instrumentation_scope.attributes, + ) + self._resource = resource + self._multi_log_record_processor = multi_log_record_processor + self._instrumentation_scope = instrumentation_scope + + @property + def resource(self): + return self._resource + + def emit(self, record: LogRecord): + """Emits the :class:`LogData` by associating :class:`LogRecord` + and instrumentation info. + """ + log_data = LogData(record, self._instrumentation_scope) + self._multi_log_record_processor.emit(log_data) + + +class LoggerProvider(APILoggerProvider): + def __init__( + self, + resource: Resource | None = None, + shutdown_on_exit: bool = True, + multi_log_record_processor: SynchronousMultiLogRecordProcessor + | ConcurrentMultiLogRecordProcessor + | None = None, + ): + if resource is None: + self._resource = Resource.create({}) + else: + self._resource = resource + self._multi_log_record_processor = ( + multi_log_record_processor or SynchronousMultiLogRecordProcessor() + ) + disabled = environ.get(OTEL_SDK_DISABLED, "") + self._disabled = disabled.lower().strip() == "true" + self._at_exit_handler = None + if shutdown_on_exit: + self._at_exit_handler = atexit.register(self.shutdown) + self._logger_cache = {} + self._logger_cache_lock = Lock() + + @property + def resource(self): + return self._resource + + def _get_logger_no_cache( + self, + name: str, + version: str | None = None, + schema_url: str | None = None, + attributes: Attributes | None = None, + ) -> Logger: + return Logger( + self._resource, + self._multi_log_record_processor, + InstrumentationScope( + name, + version, + schema_url, + attributes, + ), + ) + + def _get_logger_cached( + self, + name: str, + version: str | None = None, + schema_url: str | None = None, + ) -> Logger: + with self._logger_cache_lock: + key = (name, version, schema_url) + if key in self._logger_cache: + return self._logger_cache[key] + + self._logger_cache[key] = self._get_logger_no_cache( + name, version, schema_url + ) + return self._logger_cache[key] + + def get_logger( + self, + name: str, + version: str | None = None, + schema_url: str | None = None, + attributes: Attributes | None = None, + ) -> Logger: + if self._disabled: + return NoOpLogger( + name, + version=version, + schema_url=schema_url, + attributes=attributes, + ) + if attributes is None: + return self._get_logger_cached(name, version, schema_url) + return self._get_logger_no_cache(name, version, schema_url, attributes) + + def add_log_record_processor( + self, log_record_processor: LogRecordProcessor + ): + """Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance. + + The log processors are invoked in the same order they are registered. + """ + self._multi_log_record_processor.add_log_record_processor( + log_record_processor + ) + + def shutdown(self): + """Shuts down the log processors.""" + self._multi_log_record_processor.shutdown() + if self._at_exit_handler is not None: + atexit.unregister(self._at_exit_handler) + self._at_exit_handler = None + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Force flush the log processors. + + Args: + timeout_millis: The maximum amount of time to wait for logs to be + exported. + + Returns: + True if all the log processors flushes the logs within timeout, + False otherwise. + """ + return self._multi_log_record_processor.force_flush(timeout_millis) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py new file mode 100644 index 00000000..434dc745 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/__init__.py @@ -0,0 +1,462 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import abc +import collections +import enum +import logging +import os +import sys +import threading +from os import environ, linesep +from time import time_ns +from typing import IO, Callable, Deque, List, Optional, Sequence + +from opentelemetry.context import ( + _SUPPRESS_INSTRUMENTATION_KEY, + attach, + detach, + set_value, +) +from opentelemetry.sdk._logs import LogData, LogRecord, LogRecordProcessor +from opentelemetry.sdk.environment_variables import ( + OTEL_BLRP_EXPORT_TIMEOUT, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + OTEL_BLRP_MAX_QUEUE_SIZE, + OTEL_BLRP_SCHEDULE_DELAY, +) +from opentelemetry.util._once import Once + +_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000 +_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512 +_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000 +_DEFAULT_MAX_QUEUE_SIZE = 2048 +_ENV_VAR_INT_VALUE_ERROR_MESSAGE = ( + "Unable to parse value for %s as integer. Defaulting to %s." +) + +_logger = logging.getLogger(__name__) + + +class LogExportResult(enum.Enum): + SUCCESS = 0 + FAILURE = 1 + + +class LogExporter(abc.ABC): + """Interface for exporting logs. + + Interface to be implemented by services that want to export logs received + in their own format. + + To export data this MUST be registered to the :class`opentelemetry.sdk._logs.Logger` using a + log processor. + """ + + @abc.abstractmethod + def export(self, batch: Sequence[LogData]): + """Exports a batch of logs. + + Args: + batch: The list of `LogData` objects to be exported + + Returns: + The result of the export + """ + + @abc.abstractmethod + def shutdown(self): + """Shuts down the exporter. + + Called when the SDK is shut down. + """ + + +class ConsoleLogExporter(LogExporter): + """Implementation of :class:`LogExporter` that prints log records to the + console. + + This class can be used for diagnostic purposes. It prints the exported + log records to the console STDOUT. + """ + + def __init__( + self, + out: IO = sys.stdout, + formatter: Callable[[LogRecord], str] = lambda record: record.to_json() + + linesep, + ): + self.out = out + self.formatter = formatter + + def export(self, batch: Sequence[LogData]): + for data in batch: + self.out.write(self.formatter(data.log_record)) + self.out.flush() + return LogExportResult.SUCCESS + + def shutdown(self): + pass + + +class SimpleLogRecordProcessor(LogRecordProcessor): + """This is an implementation of LogRecordProcessor which passes + received logs in the export-friendly LogData representation to the + configured LogExporter, as soon as they are emitted. + """ + + def __init__(self, exporter: LogExporter): + self._exporter = exporter + self._shutdown = False + + def emit(self, log_data: LogData): + if self._shutdown: + _logger.warning("Processor is already shutdown, ignoring call") + return + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + try: + self._exporter.export((log_data,)) + except Exception: # pylint: disable=broad-exception-caught + _logger.exception("Exception while exporting logs.") + detach(token) + + def shutdown(self): + self._shutdown = True + self._exporter.shutdown() + + def force_flush(self, timeout_millis: int = 30000) -> bool: # pylint: disable=no-self-use + return True + + +class _FlushRequest: + __slots__ = ["event", "num_log_records"] + + def __init__(self): + self.event = threading.Event() + self.num_log_records = 0 + + +_BSP_RESET_ONCE = Once() + + +class BatchLogRecordProcessor(LogRecordProcessor): + """This is an implementation of LogRecordProcessor which creates batches of + received logs in the export-friendly LogData representation and + send to the configured LogExporter, as soon as they are emitted. + + `BatchLogRecordProcessor` is configurable with the following environment + variables which correspond to constructor parameters: + + - :envvar:`OTEL_BLRP_SCHEDULE_DELAY` + - :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` + - :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` + - :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` + """ + + _queue: Deque[LogData] + _flush_request: _FlushRequest | None + _log_records: List[LogData | None] + + def __init__( + self, + exporter: LogExporter, + schedule_delay_millis: float | None = None, + max_export_batch_size: int | None = None, + export_timeout_millis: float | None = None, + max_queue_size: int | None = None, + ): + if max_queue_size is None: + max_queue_size = BatchLogRecordProcessor._default_max_queue_size() + + if schedule_delay_millis is None: + schedule_delay_millis = ( + BatchLogRecordProcessor._default_schedule_delay_millis() + ) + + if max_export_batch_size is None: + max_export_batch_size = ( + BatchLogRecordProcessor._default_max_export_batch_size() + ) + + if export_timeout_millis is None: + export_timeout_millis = ( + BatchLogRecordProcessor._default_export_timeout_millis() + ) + + BatchLogRecordProcessor._validate_arguments( + max_queue_size, schedule_delay_millis, max_export_batch_size + ) + + self._exporter = exporter + self._max_queue_size = max_queue_size + self._schedule_delay_millis = schedule_delay_millis + self._max_export_batch_size = max_export_batch_size + self._export_timeout_millis = export_timeout_millis + self._queue = collections.deque([], max_queue_size) + self._worker_thread = threading.Thread( + name="OtelBatchLogRecordProcessor", + target=self.worker, + daemon=True, + ) + self._condition = threading.Condition(threading.Lock()) + self._shutdown = False + self._flush_request = None + self._log_records = [None] * self._max_export_batch_size + self._worker_thread.start() + if hasattr(os, "register_at_fork"): + os.register_at_fork(after_in_child=self._at_fork_reinit) # pylint: disable=protected-access + self._pid = os.getpid() + + def _at_fork_reinit(self): + self._condition = threading.Condition(threading.Lock()) + self._queue.clear() + self._worker_thread = threading.Thread( + name="OtelBatchLogRecordProcessor", + target=self.worker, + daemon=True, + ) + self._worker_thread.start() + self._pid = os.getpid() + + def worker(self): + timeout = self._schedule_delay_millis / 1e3 + flush_request: Optional[_FlushRequest] = None + while not self._shutdown: + with self._condition: + if self._shutdown: + # shutdown may have been called, avoid further processing + break + flush_request = self._get_and_unset_flush_request() + if ( + len(self._queue) < self._max_export_batch_size + and flush_request is None + ): + self._condition.wait(timeout) + + flush_request = self._get_and_unset_flush_request() + if not self._queue: + timeout = self._schedule_delay_millis / 1e3 + self._notify_flush_request_finished(flush_request) + flush_request = None + continue + if self._shutdown: + break + + start_ns = time_ns() + self._export(flush_request) + end_ns = time_ns() + # subtract the duration of this export call to the next timeout + timeout = self._schedule_delay_millis / 1e3 - ( + (end_ns - start_ns) / 1e9 + ) + + self._notify_flush_request_finished(flush_request) + flush_request = None + + # there might have been a new flush request while export was running + # and before the done flag switched to true + with self._condition: + shutdown_flush_request = self._get_and_unset_flush_request() + + # flush the remaining logs + self._drain_queue() + self._notify_flush_request_finished(flush_request) + self._notify_flush_request_finished(shutdown_flush_request) + + def _export(self, flush_request: Optional[_FlushRequest] = None): + """Exports logs considering the given flush_request. + + If flush_request is not None then logs are exported in batches + until the number of exported logs reached or exceeded the num of logs in + flush_request, otherwise exports at max max_export_batch_size logs. + """ + if flush_request is None: + self._export_batch() + return + + num_log_records = flush_request.num_log_records + while self._queue: + exported = self._export_batch() + num_log_records -= exported + + if num_log_records <= 0: + break + + def _export_batch(self) -> int: + """Exports at most max_export_batch_size logs and returns the number of + exported logs. + """ + idx = 0 + while idx < self._max_export_batch_size and self._queue: + record = self._queue.pop() + self._log_records[idx] = record + idx += 1 + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + try: + self._exporter.export(self._log_records[:idx]) # type: ignore + except Exception: # pylint: disable=broad-exception-caught + _logger.exception("Exception while exporting logs.") + detach(token) + + for index in range(idx): + self._log_records[index] = None + return idx + + def _drain_queue(self): + """Export all elements until queue is empty. + + Can only be called from the worker thread context because it invokes + `export` that is not thread safe. + """ + while self._queue: + self._export_batch() + + def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]: + flush_request = self._flush_request + self._flush_request = None + if flush_request is not None: + flush_request.num_log_records = len(self._queue) + return flush_request + + @staticmethod + def _notify_flush_request_finished( + flush_request: Optional[_FlushRequest] = None, + ): + if flush_request is not None: + flush_request.event.set() + + def _get_or_create_flush_request(self) -> _FlushRequest: + if self._flush_request is None: + self._flush_request = _FlushRequest() + return self._flush_request + + def emit(self, log_data: LogData) -> None: + """Adds the `LogData` to queue and notifies the waiting threads + when size of queue reaches max_export_batch_size. + """ + if self._shutdown: + return + if self._pid != os.getpid(): + _BSP_RESET_ONCE.do_once(self._at_fork_reinit) + + self._queue.appendleft(log_data) + if len(self._queue) >= self._max_export_batch_size: + with self._condition: + self._condition.notify() + + def shutdown(self): + self._shutdown = True + with self._condition: + self._condition.notify_all() + self._worker_thread.join() + self._exporter.shutdown() + + def force_flush(self, timeout_millis: Optional[int] = None) -> bool: + if timeout_millis is None: + timeout_millis = self._export_timeout_millis + if self._shutdown: + return True + + with self._condition: + flush_request = self._get_or_create_flush_request() + self._condition.notify_all() + + ret = flush_request.event.wait(timeout_millis / 1e3) + if not ret: + _logger.warning("Timeout was exceeded in force_flush().") + return ret + + @staticmethod + def _default_max_queue_size(): + try: + return int( + environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) + ) + except ValueError: + _logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BLRP_MAX_QUEUE_SIZE, + _DEFAULT_MAX_QUEUE_SIZE, + ) + return _DEFAULT_MAX_QUEUE_SIZE + + @staticmethod + def _default_schedule_delay_millis(): + try: + return int( + environ.get( + OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS + ) + ) + except ValueError: + _logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BLRP_SCHEDULE_DELAY, + _DEFAULT_SCHEDULE_DELAY_MILLIS, + ) + return _DEFAULT_SCHEDULE_DELAY_MILLIS + + @staticmethod + def _default_max_export_batch_size(): + try: + return int( + environ.get( + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + ) + ) + except ValueError: + _logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + ) + return _DEFAULT_MAX_EXPORT_BATCH_SIZE + + @staticmethod + def _default_export_timeout_millis(): + try: + return int( + environ.get( + OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS + ) + ) + except ValueError: + _logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BLRP_EXPORT_TIMEOUT, + _DEFAULT_EXPORT_TIMEOUT_MILLIS, + ) + return _DEFAULT_EXPORT_TIMEOUT_MILLIS + + @staticmethod + def _validate_arguments( + max_queue_size, schedule_delay_millis, max_export_batch_size + ): + if max_queue_size <= 0: + raise ValueError("max_queue_size must be a positive integer.") + + if schedule_delay_millis <= 0: + raise ValueError("schedule_delay_millis must be positive.") + + if max_export_batch_size <= 0: + raise ValueError( + "max_export_batch_size must be a positive integer." + ) + + if max_export_batch_size > max_queue_size: + raise ValueError( + "max_export_batch_size must be less than or equal to max_queue_size." + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py new file mode 100644 index 00000000..68cb6b73 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py @@ -0,0 +1,51 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import typing + +from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs.export import LogExporter, LogExportResult + + +class InMemoryLogExporter(LogExporter): + """Implementation of :class:`.LogExporter` that stores logs in memory. + + This class can be used for testing purposes. It stores the exported logs + in a list in memory that can be retrieved using the + :func:`.get_finished_logs` method. + """ + + def __init__(self): + self._logs = [] + self._lock = threading.Lock() + self._stopped = False + + def clear(self) -> None: + with self._lock: + self._logs.clear() + + def get_finished_logs(self) -> typing.Tuple[LogData, ...]: + with self._lock: + return tuple(self._logs) + + def export(self, batch: typing.Sequence[LogData]) -> LogExportResult: + if self._stopped: + return LogExportResult.FAILURE + with self._lock: + self._logs.extend(batch) + return LogExportResult.SUCCESS + + def shutdown(self) -> None: + self._stopped = True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py new file mode 100644 index 00000000..37a9eca7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/_logs/export/__init__.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.sdk._logs._internal.export import ( + BatchLogRecordProcessor, + ConsoleLogExporter, + LogExporter, + LogExportResult, + SimpleLogRecordProcessor, +) + +# The point module is not in the export directory to avoid a circular import. +from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import ( + InMemoryLogExporter, +) + +__all__ = [ + "BatchLogRecordProcessor", + "ConsoleLogExporter", + "LogExporter", + "LogExportResult", + "SimpleLogRecordProcessor", + "InMemoryLogExporter", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py new file mode 100644 index 00000000..f0980754 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/environment_variables/__init__.py @@ -0,0 +1,782 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OTEL_SDK_DISABLED = "OTEL_SDK_DISABLED" +""" +.. envvar:: OTEL_SDK_DISABLED + +The :envvar:`OTEL_SDK_DISABLED` environment variable disables the SDK for all signals +Default: "false" +""" + +OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES" +""" +.. envvar:: OTEL_RESOURCE_ATTRIBUTES + +The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource +attributes to be passed to the SDK at process invocation. The attributes from +:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to +`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower* +priority. Attributes should be in the format ``key1=value1,key2=value2``. +Additional details are available `in the specification +<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`__. + +.. code-block:: console + + $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF + import pprint + from opentelemetry.sdk.resources import Resource + pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes) + EOF + {'service.name': 'shoppingcard', + 'telemetry.sdk.language': 'python', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '0.13.dev0', + 'will_be_overridden': 'bar'} +""" + +OTEL_LOG_LEVEL = "OTEL_LOG_LEVEL" +""" +.. envvar:: OTEL_LOG_LEVEL + +The :envvar:`OTEL_LOG_LEVEL` environment variable sets the log level used by the SDK logger +Default: "info" +""" + +OTEL_TRACES_SAMPLER = "OTEL_TRACES_SAMPLER" +""" +.. envvar:: OTEL_TRACES_SAMPLER + +The :envvar:`OTEL_TRACES_SAMPLER` environment variable sets the sampler to be used for traces. +Sampling is a mechanism to control the noise introduced by OpenTelemetry by reducing the number +of traces collected and sent to the backend +Default: "parentbased_always_on" +""" + +OTEL_TRACES_SAMPLER_ARG = "OTEL_TRACES_SAMPLER_ARG" +""" +.. envvar:: OTEL_TRACES_SAMPLER_ARG + +The :envvar:`OTEL_TRACES_SAMPLER_ARG` environment variable will only be used if OTEL_TRACES_SAMPLER is set. +Each Sampler type defines its own expected input, if any. +Invalid or unrecognized input is ignored, +i.e. the SDK behaves as if OTEL_TRACES_SAMPLER_ARG is not set. +""" + +OTEL_BLRP_SCHEDULE_DELAY = "OTEL_BLRP_SCHEDULE_DELAY" +""" +.. envvar:: OTEL_BLRP_SCHEDULE_DELAY + +The :envvar:`OTEL_BLRP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchLogRecordProcessor. +Default: 5000 +""" + +OTEL_BLRP_EXPORT_TIMEOUT = "OTEL_BLRP_EXPORT_TIMEOUT" +""" +.. envvar:: OTEL_BLRP_EXPORT_TIMEOUT + +The :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchLogRecordProcessor. +Default: 30000 +""" + +OTEL_BLRP_MAX_QUEUE_SIZE = "OTEL_BLRP_MAX_QUEUE_SIZE" +""" +.. envvar:: OTEL_BLRP_MAX_QUEUE_SIZE + +The :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchLogRecordProcessor. +Default: 2048 +""" + +OTEL_BLRP_MAX_EXPORT_BATCH_SIZE = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE" +""" +.. envvar:: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE + +The :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchLogRecordProcessor. +Default: 512 +""" + +OTEL_BSP_SCHEDULE_DELAY = "OTEL_BSP_SCHEDULE_DELAY" +""" +.. envvar:: OTEL_BSP_SCHEDULE_DELAY + +The :envvar:`OTEL_BSP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchSpanProcessor. +Default: 5000 +""" + +OTEL_BSP_EXPORT_TIMEOUT = "OTEL_BSP_EXPORT_TIMEOUT" +""" +.. envvar:: OTEL_BSP_EXPORT_TIMEOUT + +The :envvar:`OTEL_BSP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchSpanProcessor. +Default: 30000 +""" + +OTEL_BSP_MAX_QUEUE_SIZE = "OTEL_BSP_MAX_QUEUE_SIZE" +""" +.. envvar:: OTEL_BSP_MAX_QUEUE_SIZE + +The :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchSpanProcessor. +Default: 2048 +""" + +OTEL_BSP_MAX_EXPORT_BATCH_SIZE = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" +""" +.. envvar:: OTEL_BSP_MAX_EXPORT_BATCH_SIZE + +The :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchSpanProcessor. +Default: 512 +""" + +OTEL_ATTRIBUTE_COUNT_LIMIT = "OTEL_ATTRIBUTE_COUNT_LIMIT" +""" +.. envvar:: OTEL_ATTRIBUTE_COUNT_LIMIT + +The :envvar:`OTEL_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed attribute count for spans, events and links. +This limit is overridden by model specific limits such as OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT. +Default: 128 +""" + +OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" +""" +.. envvar:: OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT + +The :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed attribute length. +""" + +OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" +""" +.. envvar:: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT + +The :envvar:`OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed event attribute count. +Default: 128 +""" + +OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +""" +.. envvar:: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT + +The :envvar:`OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed link attribute count. +Default: 128 +""" + +OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" +""" +.. envvar:: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT + +The :envvar:`OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed span attribute count. +Default: 128 +""" + +OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT = ( + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" +) +""" +.. envvar:: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT + +The :envvar:`OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed length +span attribute values can have. This takes precedence over :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT`. +""" + +OTEL_SPAN_EVENT_COUNT_LIMIT = "OTEL_SPAN_EVENT_COUNT_LIMIT" +""" +.. envvar:: OTEL_SPAN_EVENT_COUNT_LIMIT + +The :envvar:`OTEL_SPAN_EVENT_COUNT_LIMIT` represents the maximum allowed span event count. +Default: 128 +""" + +OTEL_SPAN_LINK_COUNT_LIMIT = "OTEL_SPAN_LINK_COUNT_LIMIT" +""" +.. envvar:: OTEL_SPAN_LINK_COUNT_LIMIT + +The :envvar:`OTEL_SPAN_LINK_COUNT_LIMIT` represents the maximum allowed span link count. +Default: 128 +""" + +OTEL_EXPORTER_JAEGER_AGENT_HOST = "OTEL_EXPORTER_JAEGER_AGENT_HOST" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_HOST + +The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_HOST` represents the hostname for the Jaeger agent. +Default: "localhost" +""" + +OTEL_EXPORTER_JAEGER_AGENT_PORT = "OTEL_EXPORTER_JAEGER_AGENT_PORT" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_PORT + +The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_PORT` represents the port for the Jaeger agent. +Default: 6831 +""" + +OTEL_EXPORTER_JAEGER_ENDPOINT = "OTEL_EXPORTER_JAEGER_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_ENDPOINT + +The :envvar:`OTEL_EXPORTER_JAEGER_ENDPOINT` represents the HTTP endpoint for Jaeger traces. +Default: "http://localhost:14250" +""" + +OTEL_EXPORTER_JAEGER_USER = "OTEL_EXPORTER_JAEGER_USER" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_USER + +The :envvar:`OTEL_EXPORTER_JAEGER_USER` represents the username to be used for HTTP basic authentication. +""" + +OTEL_EXPORTER_JAEGER_PASSWORD = "OTEL_EXPORTER_JAEGER_PASSWORD" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_PASSWORD + +The :envvar:`OTEL_EXPORTER_JAEGER_PASSWORD` represents the password to be used for HTTP basic authentication. +""" + +OTEL_EXPORTER_JAEGER_TIMEOUT = "OTEL_EXPORTER_JAEGER_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_TIMEOUT + +Maximum time the Jaeger exporter will wait for each batch export. +Default: 10 +""" + +OTEL_EXPORTER_ZIPKIN_ENDPOINT = "OTEL_EXPORTER_ZIPKIN_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_ZIPKIN_ENDPOINT + +Zipkin collector endpoint to which the exporter will send data. This may +include a path (e.g. ``http://example.com:9411/api/v2/spans``). +""" + +OTEL_EXPORTER_ZIPKIN_TIMEOUT = "OTEL_EXPORTER_ZIPKIN_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_ZIPKIN_TIMEOUT + +Maximum time (in seconds) the Zipkin exporter will wait for each batch export. +Default: 10 +""" + +OTEL_EXPORTER_OTLP_PROTOCOL = "OTEL_EXPORTER_OTLP_PROTOCOL" +""" +.. envvar:: OTEL_EXPORTER_OTLP_PROTOCOL + +The :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` represents the transport protocol for the +OTLP exporter. +""" + +OTEL_EXPORTER_OTLP_TRACES_PROTOCOL = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` represents the transport protocol for spans. +""" + +OTEL_EXPORTER_OTLP_METRICS_PROTOCOL = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_PROTOCOL + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_PROTOCOL` represents the transport protocol for metrics. +""" + +OTEL_EXPORTER_OTLP_LOGS_PROTOCOL = "OTEL_EXPORTER_OTLP_LOGS_PROTOCOL" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_PROTOCOL + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_PROTOCOL` represents the transport protocol for logs. +""" + +OTEL_EXPORTER_OTLP_CERTIFICATE = "OTEL_EXPORTER_OTLP_CERTIFICATE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client. Should only be used for a secure connection. +""" + +OTEL_EXPORTER_OTLP_HEADERS = "OTEL_EXPORTER_OTLP_HEADERS" +""" +.. envvar:: OTEL_EXPORTER_OTLP_HEADERS + +The :envvar:`OTEL_EXPORTER_OTLP_HEADERS` contains the key-value pairs to be used as headers +associated with gRPC or HTTP requests. +""" + + +OTEL_EXPORTER_OTLP_COMPRESSION = "OTEL_EXPORTER_OTLP_COMPRESSION" +""" +.. envvar:: OTEL_EXPORTER_OTLP_COMPRESSION + +Specifies a gRPC compression method to be used in the OTLP exporters. +Possible values are: + +- ``gzip`` corresponding to `grpc.Compression.Gzip`. +- ``deflate`` corresponding to `grpc.Compression.Deflate`. + +If no ``OTEL_EXPORTER_OTLP_*COMPRESSION`` environment variable is present or +``compression`` argument passed to the exporter, the default +`grpc.Compression.NoCompression` will be used. Additional details are +available `in the specification +<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter>`__. +""" + +OTEL_EXPORTER_OTLP_TIMEOUT = "OTEL_EXPORTER_OTLP_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TIMEOUT + +The :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export. +Default: 10 +""" + +OTEL_EXPORTER_OTLP_ENDPOINT = "OTEL_EXPORTER_OTLP_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_ENDPOINT + +The :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` target to which the exporter is going to send spans or metrics. +The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. +A scheme of https indicates a secure connection and takes precedence over the insecure configuration setting. +Default: "http://localhost:4317" +""" + +OTEL_EXPORTER_OTLP_INSECURE = "OTEL_EXPORTER_OTLP_INSECURE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_INSECURE + +The :envvar:`OTEL_EXPORTER_OTLP_INSECURE` represents whether to enable client transport security for gRPC requests. +A scheme of https takes precedence over this configuration setting. +Default: False +""" + +OTEL_EXPORTER_OTLP_TRACES_INSECURE = "OTEL_EXPORTER_OTLP_TRACES_INSECURE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_INSECURE + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_INSECURE` represents whether to enable client transport security +for gRPC requests for spans. A scheme of https takes precedence over the this configuration setting. +Default: False +""" + + +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` target to which the span exporter is going to send spans. +The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. +A scheme of https indicates a secure connection and takes precedence over this configuration setting. +""" + +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metrics exporter is going to send metrics. +The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. +A scheme of https indicates a secure connection and takes precedence over this configuration setting. +""" + +OTEL_EXPORTER_OTLP_LOGS_ENDPOINT = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` target to which the log exporter is going to send logs. +The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. +A scheme of https indicates a secure connection and takes precedence over this configuration setting. +""" + +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE = "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client for traces. Should only be used for a secure connection for tracing. +""" + +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client for metrics. Should only be used for a secure connection for exporting metrics. +""" + +OTEL_EXPORTER_OTLP_CLIENT_KEY = "OTEL_EXPORTER_OTLP_CLIENT_KEY" +""" +.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_KEY + +The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_KEY` stores the path to the client private key to use +in mTLS communication in PEM format. +""" + +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY = "OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` stores the path to the client private key to use +in mTLS communication in PEM format for traces. +""" + +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` stores the path to the client private key to use +in mTLS communication in PEM format for metrics. +""" + +OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY` stores the path to the client private key to use +in mTLS communication in PEM format for logs. +""" + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE = "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for +clients private key to use in mTLS communication in PEM format. +""" + +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for +clients private key to use in mTLS communication in PEM format for traces. +""" + +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for +clients private key to use in mTLS communication in PEM format for metrics. +""" + +OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for +clients private key to use in mTLS communication in PEM format for logs. +""" + +OTEL_EXPORTER_OTLP_TRACES_HEADERS = "OTEL_EXPORTER_OTLP_TRACES_HEADERS" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_HEADERS + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` contains the key-value pairs to be used as headers for spans +associated with gRPC or HTTP requests. +""" + +OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics +associated with gRPC or HTTP requests. +""" + +OTEL_EXPORTER_OTLP_LOGS_HEADERS = "OTEL_EXPORTER_OTLP_LOGS_HEADERS" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_HEADERS + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_HEADERS` contains the key-value pairs to be used as headers for logs +associated with gRPC or HTTP requests. +""" + +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION = "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_COMPRESSION + +Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the span +exporter. If both are present, this takes higher precedence. +""" + +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = ( + "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION + +Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric +exporter. If both are present, this takes higher precedence. +""" + +OTEL_EXPORTER_OTLP_LOGS_COMPRESSION = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_COMPRESSION + +Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the log +exporter. If both are present, this takes higher precedence. +""" + +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_TRACES_TIMEOUT + +The :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` is the maximum time the OTLP exporter will +wait for each batch export for spans. +""" + +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will +wait for each batch export for metrics. +""" + +OTEL_EXPORTER_OTLP_METRICS_INSECURE = "OTEL_EXPORTER_OTLP_METRICS_INSECURE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_INSECURE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_INSECURE` represents whether to enable client transport security +for gRPC requests for metrics. A scheme of https takes precedence over the this configuration setting. +Default: False +""" + +OTEL_EXPORTER_OTLP_LOGS_INSECURE = "OTEL_EXPORTER_OTLP_LOGS_INSECURE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_INSECURE + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_INSECURE` represents whether to enable client transport security +for gRPC requests for logs. A scheme of https takes precedence over the this configuration setting. +Default: False +""" + +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metric exporter is going to send spans. +The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. +A scheme of https indicates a secure connection and takes precedence over this configuration setting. +""" + +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client for metrics. Should only be used for a secure connection for metrics. +""" + +OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE = "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client for logs. Should only be used for a secure connection for logs. +""" + +OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics +associated with gRPC or HTTP requests. +""" + +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will +wait for each batch export for metrics. +""" + +OTEL_EXPORTER_OTLP_LOGS_TIMEOUT = "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT" +""" +.. envvar:: OTEL_EXPORTER_OTLP_LOGS_TIMEOUT + +The :envvar:`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` is the maximum time the OTLP exporter will +wait for each batch export for logs. +""" + +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = ( + "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION + +Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric +exporter. If both are present, this takes higher precedence. +""" + +OTEL_EXPORTER_JAEGER_CERTIFICATE = "OTEL_EXPORTER_JAEGER_CERTIFICATE" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_JAEGER_CERTIFICATE` stores the path to the certificate file for +TLS credentials of gRPC client for Jaeger. Should only be used for a secure connection with Jaeger. +""" + +OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES = ( + "OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES" +) +""" +.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES + +The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES` is a boolean flag to determine whether +to split a large span batch to admire the udp packet size limit. +""" + +OTEL_SERVICE_NAME = "OTEL_SERVICE_NAME" +""" +.. envvar:: OTEL_SERVICE_NAME + +Convenience environment variable for setting the service name resource attribute. +The following two environment variables have the same effect + +.. code-block:: console + + OTEL_SERVICE_NAME=my-python-service + + OTEL_RESOURCE_ATTRIBUTES=service.name=my-python-service + + +If both are set, :envvar:`OTEL_SERVICE_NAME` takes precedence. +""" + + +_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED = ( + "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED" +) +""" +.. envvar:: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED + +The :envvar:`OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED` environment variable allows users to +enable/disable the auto instrumentation for the python logging module. +Default: False + +Note: Logs SDK and its related settings are experimental. +""" + + +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE = ( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment +variable allows users to set the default aggregation temporality policy to use +on the basis of instrument kind. The valid (case-insensitive) values are: + +``CUMULATIVE``: Use ``CUMULATIVE`` aggregation temporality for all instrument kinds. +``DELTA``: Use ``DELTA`` aggregation temporality for ``Counter``, ``Asynchronous Counter`` and ``Histogram``. +Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter`` and ``Asynchronous UpDownCounter``. +``LOWMEMORY``: Use ``DELTA`` aggregation temporality for ``Counter`` and ``Histogram``. +Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter``, ``AsynchronousCounter`` and ``Asynchronous UpDownCounter``. +""" + +OTEL_EXPORTER_JAEGER_GRPC_INSECURE = "OTEL_EXPORTER_JAEGER_GRPC_INSECURE" +""" +.. envvar:: OTEL_EXPORTER_JAEGER_GRPC_INSECURE + +The :envvar:`OTEL_EXPORTER_JAEGER_GRPC_INSECURE` is a boolean flag to True if collector has no encryption or authentication. +""" + +OTEL_METRIC_EXPORT_INTERVAL = "OTEL_METRIC_EXPORT_INTERVAL" +""" +.. envvar:: OTEL_METRIC_EXPORT_INTERVAL + +The :envvar:`OTEL_METRIC_EXPORT_INTERVAL` is the time interval (in milliseconds) between the start of two export attempts. +""" + +OTEL_METRIC_EXPORT_TIMEOUT = "OTEL_METRIC_EXPORT_TIMEOUT" +""" +.. envvar:: OTEL_METRIC_EXPORT_TIMEOUT + +The :envvar:`OTEL_METRIC_EXPORT_TIMEOUT` is the maximum allowed time (in milliseconds) to export data. +""" + +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY" +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` is the clients private key to use in mTLS communication in PEM format. +""" + +OTEL_METRICS_EXEMPLAR_FILTER = "OTEL_METRICS_EXEMPLAR_FILTER" +""" +.. envvar:: OTEL_METRICS_EXEMPLAR_FILTER + +The :envvar:`OTEL_METRICS_EXEMPLAR_FILTER` is the filter for which measurements can become Exemplars. +""" + +OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION = ( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` is the default aggregation to use for histogram instruments. +""" + +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = ( + "OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE" +) +""" +.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE + +The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` is the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +""" + +OTEL_EXPERIMENTAL_RESOURCE_DETECTORS = "OTEL_EXPERIMENTAL_RESOURCE_DETECTORS" +""" +.. envvar:: OTEL_EXPERIMENTAL_RESOURCE_DETECTORS + +The :envvar:`OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` is a comma-separated string +of names of resource detectors. These names must be the same as the names of +entry points for the ```opentelemetry_resource_detector``` entry point. This is an +experimental feature and the name of this variable and its behavior can change +in a non-backwards compatible way. +""" + +OTEL_EXPORTER_PROMETHEUS_HOST = "OTEL_EXPORTER_PROMETHEUS_HOST" +""" +.. envvar:: OTEL_EXPORTER_PROMETHEUS_HOST + +The :envvar:`OTEL_EXPORTER_PROMETHEUS_HOST` environment variable configures the host used by +the Prometheus exporter. +Default: "localhost" + +This is an experimental environment variable and the name of this variable and its behavior can +change in a non-backwards compatible way. +""" + +OTEL_EXPORTER_PROMETHEUS_PORT = "OTEL_EXPORTER_PROMETHEUS_PORT" +""" +.. envvar:: OTEL_EXPORTER_PROMETHEUS_PORT + +The :envvar:`OTEL_EXPORTER_PROMETHEUS_PORT` environment variable configures the port used by +the Prometheus exporter. +Default: 9464 + +This is an experimental environment variable and the name of this variable and its behavior can +change in a non-backwards compatible way. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py new file mode 100644 index 00000000..3e0e778f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/error_handler/__init__.py @@ -0,0 +1,143 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Global Error Handler + +This module provides a global error handler and an interface that allows +error handlers to be registered with the global error handler via entry points. +A default error handler is also provided. + +To use this feature, users can create an error handler that is registered +using the ``opentelemetry_error_handler`` entry point. A class is to be +registered in this entry point, this class must inherit from the +``opentelemetry.sdk.error_handler.ErrorHandler`` class and implement the +corresponding ``handle`` method. This method will receive the exception object +that is to be handled. The error handler class should also inherit from the +exception classes it wants to handle. For example, this would be an error +handler that handles ``ZeroDivisionError``: + +.. code:: python + + from opentelemetry.sdk.error_handler import ErrorHandler + from logging import getLogger + + logger = getLogger(__name__) + + + class ErrorHandler0(ErrorHandler, ZeroDivisionError): + + def _handle(self, error: Exception, *args, **kwargs): + + logger.exception("ErrorHandler0 handling a ZeroDivisionError") + +To use the global error handler, just instantiate it as a context manager where +you want exceptions to be handled: + + +.. code:: python + + from opentelemetry.sdk.error_handler import GlobalErrorHandler + + with GlobalErrorHandler(): + 1 / 0 + +If the class of the exception raised in the scope of the ``GlobalErrorHandler`` +object is not parent of any registered error handler, then the default error +handler will handle the exception. This default error handler will only log the +exception to standard logging, the exception won't be raised any further. +""" + +from abc import ABC, abstractmethod +from logging import getLogger + +from opentelemetry.util._importlib_metadata import entry_points + +logger = getLogger(__name__) + + +class ErrorHandler(ABC): + @abstractmethod + def _handle(self, error: Exception, *args, **kwargs): + """ + Handle an exception + """ + + +class _DefaultErrorHandler(ErrorHandler): + """ + Default error handler + + This error handler just logs the exception using standard logging. + """ + + # pylint: disable=useless-return + def _handle(self, error: Exception, *args, **kwargs): + logger.exception("Error handled by default error handler: ") + return None + + +class GlobalErrorHandler: + """ + Global error handler + + This is a singleton class that can be instantiated anywhere to get the + global error handler. This object provides a handle method that receives + an exception object that will be handled by the registered error handlers. + """ + + _instance = None + + def __new__(cls) -> "GlobalErrorHandler": + if cls._instance is None: + cls._instance = super().__new__(cls) + + return cls._instance + + def __enter__(self): + pass + + # pylint: disable=no-self-use + def __exit__(self, exc_type, exc_value, traceback): + if exc_value is None: + return None + + plugin_handled = False + + error_handler_entry_points = entry_points( + group="opentelemetry_error_handler" + ) + + for error_handler_entry_point in error_handler_entry_points: + error_handler_class = error_handler_entry_point.load() + + if issubclass(error_handler_class, exc_value.__class__): + try: + error_handler_class()._handle(exc_value) + plugin_handled = True + + # pylint: disable=broad-exception-caught + except Exception as error_handling_error: + logger.exception( + "%s error while handling error" + " %s by error handler %s", + error_handling_error.__class__.__name__, + exc_value.__class__.__name__, + error_handler_class.__name__, + ) + + if not plugin_handled: + _DefaultErrorHandler()._handle(exc_value) + + return True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py new file mode 100644 index 00000000..b022f129 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/__init__.py @@ -0,0 +1,57 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.sdk.metrics._internal import Meter, MeterProvider +from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + Exemplar, + ExemplarFilter, + ExemplarReservoir, + SimpleFixedSizeExemplarReservoir, + TraceBasedExemplarFilter, +) +from opentelemetry.sdk.metrics._internal.instrument import ( + Counter, + Histogram, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, +) +from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge + +__all__ = [ + "AlignedHistogramBucketExemplarReservoir", + "AlwaysOnExemplarFilter", + "AlwaysOffExemplarFilter", + "Exemplar", + "ExemplarFilter", + "ExemplarReservoir", + "Meter", + "MeterProvider", + "MetricsTimeoutError", + "Counter", + "Histogram", + "_Gauge", + "ObservableCounter", + "ObservableGauge", + "ObservableUpDownCounter", + "SimpleFixedSizeExemplarReservoir", + "UpDownCounter", + "TraceBasedExemplarFilter", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py new file mode 100644 index 00000000..faa0959f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/__init__.py @@ -0,0 +1,582 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import weakref +from atexit import register, unregister +from logging import getLogger +from os import environ +from threading import Lock +from time import time_ns +from typing import Optional, Sequence + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics +from opentelemetry.metrics import Counter as APICounter +from opentelemetry.metrics import Histogram as APIHistogram +from opentelemetry.metrics import Meter as APIMeter +from opentelemetry.metrics import MeterProvider as APIMeterProvider +from opentelemetry.metrics import NoOpMeter +from opentelemetry.metrics import ObservableCounter as APIObservableCounter +from opentelemetry.metrics import ObservableGauge as APIObservableGauge +from opentelemetry.metrics import ( + ObservableUpDownCounter as APIObservableUpDownCounter, +) +from opentelemetry.metrics import UpDownCounter as APIUpDownCounter +from opentelemetry.metrics import _Gauge as APIGauge +from opentelemetry.sdk.environment_variables import ( + OTEL_METRICS_EXEMPLAR_FILTER, + OTEL_SDK_DISABLED, +) +from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + ExemplarFilter, + TraceBasedExemplarFilter, +) +from opentelemetry.sdk.metrics._internal.instrument import ( + _Counter, + _Gauge, + _Histogram, + _ObservableCounter, + _ObservableGauge, + _ObservableUpDownCounter, + _UpDownCounter, +) +from opentelemetry.sdk.metrics._internal.measurement_consumer import ( + MeasurementConsumer, + SynchronousMeasurementConsumer, +) +from opentelemetry.sdk.metrics._internal.sdk_configuration import ( + SdkConfiguration, +) +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.util.instrumentation import InstrumentationScope +from opentelemetry.util._once import Once +from opentelemetry.util.types import ( + Attributes, +) + +_logger = getLogger(__name__) + + +class Meter(APIMeter): + """See `opentelemetry.metrics.Meter`.""" + + def __init__( + self, + instrumentation_scope: InstrumentationScope, + measurement_consumer: MeasurementConsumer, + ): + super().__init__( + name=instrumentation_scope.name, + version=instrumentation_scope.version, + schema_url=instrumentation_scope.schema_url, + ) + self._instrumentation_scope = instrumentation_scope + self._measurement_consumer = measurement_consumer + self._instrument_id_instrument = {} + self._instrument_id_instrument_lock = Lock() + + def create_counter(self, name, unit="", description="") -> APICounter: + status = self._register_instrument(name, _Counter, unit, description) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APICounter.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _Counter( + name, + self._instrumentation_scope, + self._measurement_consumer, + unit, + description, + ) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_up_down_counter( + self, name, unit="", description="" + ) -> APIUpDownCounter: + status = self._register_instrument( + name, _UpDownCounter, unit, description + ) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIUpDownCounter.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _UpDownCounter( + name, + self._instrumentation_scope, + self._measurement_consumer, + unit, + description, + ) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_observable_counter( + self, + name, + callbacks=None, + unit="", + description="", + ) -> APIObservableCounter: + status = self._register_instrument( + name, _ObservableCounter, unit, description + ) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIObservableCounter.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _ObservableCounter( + name, + self._instrumentation_scope, + self._measurement_consumer, + callbacks, + unit, + description, + ) + + self._measurement_consumer.register_asynchronous_instrument(instrument) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_histogram( + self, + name: str, + unit: str = "", + description: str = "", + *, + explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, + ) -> APIHistogram: + if explicit_bucket_boundaries_advisory is not None: + invalid_advisory = False + if isinstance(explicit_bucket_boundaries_advisory, Sequence): + try: + invalid_advisory = not ( + all( + isinstance(e, (float, int)) + for e in explicit_bucket_boundaries_advisory + ) + ) + except (KeyError, TypeError): + invalid_advisory = True + else: + invalid_advisory = True + + if invalid_advisory: + explicit_bucket_boundaries_advisory = None + _logger.warning( + "explicit_bucket_boundaries_advisory must be a sequence of numbers" + ) + + status = self._register_instrument( + name, + _Histogram, + unit, + description, + explicit_bucket_boundaries_advisory, + ) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIHistogram.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _Histogram( + name, + self._instrumentation_scope, + self._measurement_consumer, + unit, + description, + explicit_bucket_boundaries_advisory, + ) + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_gauge(self, name, unit="", description="") -> APIGauge: + status = self._register_instrument(name, _Gauge, unit, description) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIGauge.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _Gauge( + name, + self._instrumentation_scope, + self._measurement_consumer, + unit, + description, + ) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_observable_gauge( + self, name, callbacks=None, unit="", description="" + ) -> APIObservableGauge: + status = self._register_instrument( + name, _ObservableGauge, unit, description + ) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIObservableGauge.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _ObservableGauge( + name, + self._instrumentation_scope, + self._measurement_consumer, + callbacks, + unit, + description, + ) + + self._measurement_consumer.register_asynchronous_instrument(instrument) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + def create_observable_up_down_counter( + self, name, callbacks=None, unit="", description="" + ) -> APIObservableUpDownCounter: + status = self._register_instrument( + name, _ObservableUpDownCounter, unit, description + ) + + if status.conflict: + # FIXME #2558 go through all views here and check if this + # instrument registration conflict can be fixed. If it can be, do + # not log the following warning. + self._log_instrument_registration_conflict( + name, + APIObservableUpDownCounter.__name__, + unit, + description, + status, + ) + if status.already_registered: + with self._instrument_id_instrument_lock: + return self._instrument_id_instrument[status.instrument_id] + + instrument = _ObservableUpDownCounter( + name, + self._instrumentation_scope, + self._measurement_consumer, + callbacks, + unit, + description, + ) + + self._measurement_consumer.register_asynchronous_instrument(instrument) + + with self._instrument_id_instrument_lock: + self._instrument_id_instrument[status.instrument_id] = instrument + return instrument + + +def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter: + if exemplar_filter == "trace_based": + return TraceBasedExemplarFilter() + if exemplar_filter == "always_on": + return AlwaysOnExemplarFilter() + if exemplar_filter == "always_off": + return AlwaysOffExemplarFilter() + msg = f"Unknown exemplar filter '{exemplar_filter}'." + raise ValueError(msg) + + +class MeterProvider(APIMeterProvider): + r"""See `opentelemetry.metrics.MeterProvider`. + + Args: + metric_readers: Register metric readers to collect metrics from the SDK + on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is + completely independent and will collect separate streams of + metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push + exporters here. + resource: The resource representing what the metrics emitted from the SDK pertain to. + shutdown_on_exit: If true, registers an `atexit` handler to call + `MeterProvider.shutdown` + views: The views to configure the metric output the SDK + + By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s + are provided) will report metrics with the default aggregation for the + instrument's kind. To disable instruments by default, configure a match-all + :class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable + individual instruments: + + .. code-block:: python + :caption: Disable default views + + MeterProvider( + views=[ + View(instrument_name="*", aggregation=DropAggregation()), + View(instrument_name="mycounter"), + ], + # ... + ) + """ + + _all_metric_readers_lock = Lock() + _all_metric_readers = weakref.WeakSet() + + def __init__( + self, + metric_readers: Sequence[ + "opentelemetry.sdk.metrics.export.MetricReader" + ] = (), + resource: Optional[Resource] = None, + exemplar_filter: Optional[ExemplarFilter] = None, + shutdown_on_exit: bool = True, + views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), + ): + self._lock = Lock() + self._meter_lock = Lock() + self._atexit_handler = None + if resource is None: + resource = Resource.create({}) + self._sdk_config = SdkConfiguration( + exemplar_filter=( + exemplar_filter + or _get_exemplar_filter( + environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based") + ) + ), + resource=resource, + metric_readers=metric_readers, + views=views, + ) + self._measurement_consumer = SynchronousMeasurementConsumer( + sdk_config=self._sdk_config + ) + disabled = environ.get(OTEL_SDK_DISABLED, "") + self._disabled = disabled.lower().strip() == "true" + + if shutdown_on_exit: + self._atexit_handler = register(self.shutdown) + + self._meters = {} + self._shutdown_once = Once() + self._shutdown = False + + for metric_reader in self._sdk_config.metric_readers: + with self._all_metric_readers_lock: + if metric_reader in self._all_metric_readers: + # pylint: disable=broad-exception-raised + raise Exception( + f"MetricReader {metric_reader} has been registered " + "already in other MeterProvider instance" + ) + + self._all_metric_readers.add(metric_reader) + + metric_reader._set_collect_callback( + self._measurement_consumer.collect + ) + + def force_flush(self, timeout_millis: float = 10_000) -> bool: + deadline_ns = time_ns() + timeout_millis * 10**6 + + metric_reader_error = {} + + for metric_reader in self._sdk_config.metric_readers: + current_ts = time_ns() + try: + if current_ts >= deadline_ns: + raise MetricsTimeoutError( + "Timed out while flushing metric readers" + ) + metric_reader.force_flush( + timeout_millis=(deadline_ns - current_ts) / 10**6 + ) + + # pylint: disable=broad-exception-caught + except Exception as error: + metric_reader_error[metric_reader] = error + + if metric_reader_error: + metric_reader_error_string = "\n".join( + [ + f"{metric_reader.__class__.__name__}: {repr(error)}" + for metric_reader, error in metric_reader_error.items() + ] + ) + + # pylint: disable=broad-exception-raised + raise Exception( + "MeterProvider.force_flush failed because the following " + "metric readers failed during collect:\n" + f"{metric_reader_error_string}" + ) + return True + + def shutdown(self, timeout_millis: float = 30_000): + deadline_ns = time_ns() + timeout_millis * 10**6 + + def _shutdown(): + self._shutdown = True + + did_shutdown = self._shutdown_once.do_once(_shutdown) + + if not did_shutdown: + _logger.warning("shutdown can only be called once") + return + + metric_reader_error = {} + + for metric_reader in self._sdk_config.metric_readers: + current_ts = time_ns() + try: + if current_ts >= deadline_ns: + # pylint: disable=broad-exception-raised + raise Exception( + "Didn't get to execute, deadline already exceeded" + ) + metric_reader.shutdown( + timeout_millis=(deadline_ns - current_ts) / 10**6 + ) + + # pylint: disable=broad-exception-caught + except Exception as error: + metric_reader_error[metric_reader] = error + + if self._atexit_handler is not None: + unregister(self._atexit_handler) + self._atexit_handler = None + + if metric_reader_error: + metric_reader_error_string = "\n".join( + [ + f"{metric_reader.__class__.__name__}: {repr(error)}" + for metric_reader, error in metric_reader_error.items() + ] + ) + + # pylint: disable=broad-exception-raised + raise Exception( + ( + "MeterProvider.shutdown failed because the following " + "metric readers failed during shutdown:\n" + f"{metric_reader_error_string}" + ) + ) + + def get_meter( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> Meter: + if self._disabled: + return NoOpMeter(name, version=version, schema_url=schema_url) + + if self._shutdown: + _logger.warning( + "A shutdown `MeterProvider` can not provide a `Meter`" + ) + return NoOpMeter(name, version=version, schema_url=schema_url) + + if not name: + _logger.warning("Meter name cannot be None or empty.") + return NoOpMeter(name, version=version, schema_url=schema_url) + + info = InstrumentationScope(name, version, schema_url, attributes) + with self._meter_lock: + if not self._meters.get(info): + # FIXME #2558 pass SDKConfig object to meter so that the meter + # has access to views. + self._meters[info] = Meter( + info, + self._measurement_consumer, + ) + return self._meters[info] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py new file mode 100644 index 00000000..be81d70e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py @@ -0,0 +1,153 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from logging import getLogger +from threading import Lock +from time import time_ns +from typing import Dict, List, Optional, Sequence + +from opentelemetry.metrics import Instrument +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + _Aggregation, + _SumAggregation, +) +from opentelemetry.sdk.metrics._internal.export import AggregationTemporality +from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.point import DataPointT +from opentelemetry.sdk.metrics._internal.view import View + +_logger = getLogger(__name__) + + +class _ViewInstrumentMatch: + def __init__( + self, + view: View, + instrument: Instrument, + instrument_class_aggregation: Dict[type, Aggregation], + ): + self._view = view + self._instrument = instrument + self._attributes_aggregation: Dict[frozenset, _Aggregation] = {} + self._lock = Lock() + self._instrument_class_aggregation = instrument_class_aggregation + self._name = self._view._name or self._instrument.name + self._description = ( + self._view._description or self._instrument.description + ) + if not isinstance(self._view._aggregation, DefaultAggregation): + self._aggregation = self._view._aggregation._create_aggregation( + self._instrument, + None, + self._view._exemplar_reservoir_factory, + 0, + ) + else: + self._aggregation = self._instrument_class_aggregation[ + self._instrument.__class__ + ]._create_aggregation( + self._instrument, + None, + self._view._exemplar_reservoir_factory, + 0, + ) + + def conflicts(self, other: "_ViewInstrumentMatch") -> bool: + # pylint: disable=protected-access + + result = ( + self._name == other._name + and self._instrument.unit == other._instrument.unit + # The aggregation class is being used here instead of data point + # type since they are functionally equivalent. + and self._aggregation.__class__ == other._aggregation.__class__ + ) + if isinstance(self._aggregation, _SumAggregation): + result = ( + result + and self._aggregation._instrument_is_monotonic + == other._aggregation._instrument_is_monotonic + and self._aggregation._instrument_aggregation_temporality + == other._aggregation._instrument_aggregation_temporality + ) + + return result + + # pylint: disable=protected-access + def consume_measurement( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + if self._view._attribute_keys is not None: + attributes = {} + + for key, value in (measurement.attributes or {}).items(): + if key in self._view._attribute_keys: + attributes[key] = value + elif measurement.attributes is not None: + attributes = measurement.attributes + else: + attributes = {} + + aggr_key = frozenset(attributes.items()) + + if aggr_key not in self._attributes_aggregation: + with self._lock: + if aggr_key not in self._attributes_aggregation: + if not isinstance( + self._view._aggregation, DefaultAggregation + ): + aggregation = ( + self._view._aggregation._create_aggregation( + self._instrument, + attributes, + self._view._exemplar_reservoir_factory, + time_ns(), + ) + ) + else: + aggregation = self._instrument_class_aggregation[ + self._instrument.__class__ + ]._create_aggregation( + self._instrument, + attributes, + self._view._exemplar_reservoir_factory, + time_ns(), + ) + self._attributes_aggregation[aggr_key] = aggregation + + self._attributes_aggregation[aggr_key].aggregate( + measurement, should_sample_exemplar + ) + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nanos: int, + ) -> Optional[Sequence[DataPointT]]: + data_points: List[DataPointT] = [] + with self._lock: + for aggregation in self._attributes_aggregation.values(): + data_point = aggregation.collect( + collection_aggregation_temporality, collection_start_nanos + ) + if data_point is not None: + data_points.append(data_point) + + # Returning here None instead of an empty list because the caller + # does not consume a sequence and to be consistent with the rest of + # collect methods that also return None. + return data_points or None diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py new file mode 100644 index 00000000..8443d951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -0,0 +1,1475 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-lines + +from abc import ABC, abstractmethod +from bisect import bisect_left +from enum import IntEnum +from functools import partial +from logging import getLogger +from math import inf +from threading import Lock +from typing import ( + Callable, + Generic, + List, + Optional, + Sequence, + Type, + TypeVar, +) + +from opentelemetry.metrics import ( + Asynchronous, + Counter, + Histogram, + Instrument, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + Synchronous, + UpDownCounter, + _Gauge, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + Exemplar, + ExemplarReservoirBuilder, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( + Buckets, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( + Mapping, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.exponent_mapping import ( + ExponentMapping, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.logarithm_mapping import ( + LogarithmMapping, +) +from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.point import Buckets as BucketsPoint +from opentelemetry.sdk.metrics._internal.point import ( + ExponentialHistogramDataPoint, + HistogramDataPoint, + NumberDataPoint, + Sum, +) +from opentelemetry.sdk.metrics._internal.point import Gauge as GaugePoint +from opentelemetry.sdk.metrics._internal.point import ( + Histogram as HistogramPoint, +) +from opentelemetry.util.types import Attributes + +_DataPointVarT = TypeVar("_DataPointVarT", NumberDataPoint, HistogramDataPoint) + +_logger = getLogger(__name__) + + +class AggregationTemporality(IntEnum): + """ + The temporality to use when aggregating data. + + Can be one of the following values: + """ + + UNSPECIFIED = 0 + DELTA = 1 + CUMULATIVE = 2 + + +class _Aggregation(ABC, Generic[_DataPointVarT]): + def __init__( + self, + attributes: Attributes, + reservoir_builder: ExemplarReservoirBuilder, + ): + self._lock = Lock() + self._attributes = attributes + self._reservoir = reservoir_builder() + self._previous_point = None + + @abstractmethod + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + """Aggregate a measurement. + + Args: + measurement: Measurement to aggregate + should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. + """ + + @abstractmethod + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[_DataPointVarT]: + pass + + def _collect_exemplars(self) -> Sequence[Exemplar]: + """Returns the collected exemplars. + + Returns: + The exemplars collected by the reservoir + """ + return self._reservoir.collect(self._attributes) + + def _sample_exemplar( + self, measurement: Measurement, should_sample_exemplar: bool + ) -> None: + """Offer the measurement to the exemplar reservoir for sampling. + + It should be called within the each :ref:`aggregate` call. + + Args: + measurement: The new measurement + should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. + """ + if should_sample_exemplar: + self._reservoir.offer( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ) + + +class _DropAggregation(_Aggregation): + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + pass + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[_DataPointVarT]: + pass + + +class _SumAggregation(_Aggregation[Sum]): + def __init__( + self, + attributes: Attributes, + instrument_is_monotonic: bool, + instrument_aggregation_temporality: AggregationTemporality, + start_time_unix_nano: int, + reservoir_builder: ExemplarReservoirBuilder, + ): + super().__init__(attributes, reservoir_builder) + + self._start_time_unix_nano = start_time_unix_nano + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._instrument_is_monotonic = instrument_is_monotonic + + self._value = None + + self._previous_collection_start_nano = self._start_time_unix_nano + self._previous_value = 0 + + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + with self._lock: + if self._value is None: + self._value = 0 + + self._value = self._value + measurement.value + + self._sample_exemplar(measurement, should_sample_exemplar) + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[NumberDataPoint]: + """ + Atomically return a point for the current value of the metric and + reset the aggregation value. + + Synchronous instruments have a method which is called directly with + increments for a given quantity: + + For example, an instrument that counts the amount of passengers in + every vehicle that crosses a certain point in a highway: + + synchronous_instrument.add(2) + collect(...) # 2 passengers are counted + synchronous_instrument.add(3) + collect(...) # 3 passengers are counted + synchronous_instrument.add(1) + collect(...) # 1 passenger is counted + + In this case the instrument aggregation temporality is DELTA because + every value represents an increment to the count, + + Asynchronous instruments have a callback which returns the total value + of a given quantity: + + For example, an instrument that measures the amount of bytes written to + a certain hard drive: + + callback() -> 1352 + collect(...) # 1352 bytes have been written so far + callback() -> 2324 + collect(...) # 2324 bytes have been written so far + callback() -> 4542 + collect(...) # 4542 bytes have been written so far + + In this case the instrument aggregation temporality is CUMULATIVE + because every value represents the total of the measurement. + + There is also the collection aggregation temporality, which is passed + to this method. The collection aggregation temporality defines the + nature of the returned value by this aggregation. + + When the collection aggregation temporality matches the + instrument aggregation temporality, then this method returns the + current value directly: + + synchronous_instrument.add(2) + collect(DELTA) -> 2 + synchronous_instrument.add(3) + collect(DELTA) -> 3 + synchronous_instrument.add(1) + collect(DELTA) -> 1 + + callback() -> 1352 + collect(CUMULATIVE) -> 1352 + callback() -> 2324 + collect(CUMULATIVE) -> 2324 + callback() -> 4542 + collect(CUMULATIVE) -> 4542 + + When the collection aggregation temporality does not match the + instrument aggregation temporality, then a conversion is made. For this + purpose, this aggregation keeps a private attribute, + self._previous_value. + + When the instrument is synchronous: + + self._previous_value is the sum of every previously + collected (delta) value. In this case, the returned (cumulative) value + will be: + + self._previous_value + value + + synchronous_instrument.add(2) + collect(CUMULATIVE) -> 2 + synchronous_instrument.add(3) + collect(CUMULATIVE) -> 5 + synchronous_instrument.add(1) + collect(CUMULATIVE) -> 6 + + Also, as a diagram: + + time -> + + self._previous_value + |-------------| + + value (delta) + |----| + + returned value (cumulative) + |------------------| + + When the instrument is asynchronous: + + self._previous_value is the value of the previously + collected (cumulative) value. In this case, the returned (delta) value + will be: + + value - self._previous_value + + callback() -> 1352 + collect(DELTA) -> 1352 + callback() -> 2324 + collect(DELTA) -> 972 + callback() -> 4542 + collect(DELTA) -> 2218 + + Also, as a diagram: + + time -> + + self._previous_value + |-------------| + + value (cumulative) + |------------------| + + returned value (delta) + |----| + """ + + with self._lock: + value = self._value + self._value = None + + if ( + self._instrument_aggregation_temporality + is AggregationTemporality.DELTA + ): + # This happens when the corresponding instrument for this + # aggregation is synchronous. + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = ( + collection_start_nano + ) + + if value is None: + return None + + return NumberDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + value=value, + ) + + if value is None: + value = 0 + + self._previous_value = value + self._previous_value + + return NumberDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + value=self._previous_value, + ) + + # This happens when the corresponding instrument for this + # aggregation is asynchronous. + + if value is None: + # This happens when the corresponding instrument callback + # does not produce measurements. + return None + + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): + result_value = value - self._previous_value + + self._previous_value = value + + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = collection_start_nano + + return NumberDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + value=result_value, + ) + + return NumberDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + value=value, + ) + + +class _LastValueAggregation(_Aggregation[GaugePoint]): + def __init__( + self, + attributes: Attributes, + reservoir_builder: ExemplarReservoirBuilder, + ): + super().__init__(attributes, reservoir_builder) + self._value = None + + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ): + with self._lock: + self._value = measurement.value + + self._sample_exemplar(measurement, should_sample_exemplar) + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[_DataPointVarT]: + """ + Atomically return a point for the current value of the metric. + """ + with self._lock: + if self._value is None: + return None + value = self._value + self._value = None + + exemplars = self._collect_exemplars() + + return NumberDataPoint( + attributes=self._attributes, + exemplars=exemplars, + start_time_unix_nano=None, + time_unix_nano=collection_start_nano, + value=value, + ) + + +_DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES: Sequence[float] = ( + 0.0, + 5.0, + 10.0, + 25.0, + 50.0, + 75.0, + 100.0, + 250.0, + 500.0, + 750.0, + 1000.0, + 2500.0, + 5000.0, + 7500.0, + 10000.0, +) + + +class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]): + def __init__( + self, + attributes: Attributes, + instrument_aggregation_temporality: AggregationTemporality, + start_time_unix_nano: int, + reservoir_builder: ExemplarReservoirBuilder, + boundaries: Optional[Sequence[float]] = None, + record_min_max: bool = True, + ): + if boundaries is None: + boundaries = ( + _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES + ) + super().__init__( + attributes, + reservoir_builder=partial( + reservoir_builder, boundaries=boundaries + ), + ) + + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._start_time_unix_nano = start_time_unix_nano + self._boundaries = tuple(boundaries) + self._record_min_max = record_min_max + + self._value = None + self._min = inf + self._max = -inf + self._sum = 0 + + self._previous_value = None + self._previous_min = inf + self._previous_max = -inf + self._previous_sum = 0 + + self._previous_collection_start_nano = self._start_time_unix_nano + + def _get_empty_bucket_counts(self) -> List[int]: + return [0] * (len(self._boundaries) + 1) + + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + with self._lock: + if self._value is None: + self._value = self._get_empty_bucket_counts() + + measurement_value = measurement.value + + self._sum += measurement_value + + if self._record_min_max: + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) + + self._value[bisect_left(self._boundaries, measurement_value)] += 1 + + self._sample_exemplar(measurement, should_sample_exemplar) + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[_DataPointVarT]: + """ + Atomically return a point for the current value of the metric. + """ + + with self._lock: + value = self._value + sum_ = self._sum + min_ = self._min + max_ = self._max + + self._value = None + self._sum = 0 + self._min = inf + self._max = -inf + + if ( + self._instrument_aggregation_temporality + is AggregationTemporality.DELTA + ): + # This happens when the corresponding instrument for this + # aggregation is synchronous. + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = ( + collection_start_nano + ) + + if value is None: + return None + + return HistogramDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + count=sum(value), + sum=sum_, + bucket_counts=tuple(value), + explicit_bounds=self._boundaries, + min=min_, + max=max_, + ) + + if value is None: + value = self._get_empty_bucket_counts() + + if self._previous_value is None: + self._previous_value = self._get_empty_bucket_counts() + + self._previous_value = [ + value_element + previous_value_element + for ( + value_element, + previous_value_element, + ) in zip(value, self._previous_value) + ] + self._previous_min = min(min_, self._previous_min) + self._previous_max = max(max_, self._previous_max) + self._previous_sum = sum_ + self._previous_sum + + return HistogramDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + count=sum(self._previous_value), + sum=self._previous_sum, + bucket_counts=tuple(self._previous_value), + explicit_bounds=self._boundaries, + min=self._previous_min, + max=self._previous_max, + ) + + return None + + +# pylint: disable=protected-access +class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): + # _min_max_size and _max_max_size are the smallest and largest values + # the max_size parameter may have, respectively. + + # _min_max_size is is the smallest reasonable value which is small enough + # to contain the entire normal floating point range at the minimum scale. + _min_max_size = 2 + + # _max_max_size is an arbitrary limit meant to limit accidental creation of + # giant exponential bucket histograms. + _max_max_size = 16384 + + def __init__( + self, + attributes: Attributes, + reservoir_builder: ExemplarReservoirBuilder, + instrument_aggregation_temporality: AggregationTemporality, + start_time_unix_nano: int, + # This is the default maximum number of buckets per positive or + # negative number range. The value 160 is specified by OpenTelemetry. + # See the derivation here: + # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exponential-bucket-histogram-aggregation) + max_size: int = 160, + max_scale: int = 20, + ): + # max_size is the maximum capacity of the positive and negative + # buckets. + # _sum is the sum of all the values aggregated by this aggregator. + # _count is the count of all calls to aggregate. + # _zero_count is the count of all the calls to aggregate when the value + # to be aggregated is exactly 0. + # _min is the smallest value aggregated by this aggregator. + # _max is the smallest value aggregated by this aggregator. + # _positive holds the positive values. + # _negative holds the negative values by their absolute value. + if max_size < self._min_max_size: + raise ValueError( + f"Buckets max size {max_size} is smaller than " + "minimum max size {self._min_max_size}" + ) + + if max_size > self._max_max_size: + raise ValueError( + f"Buckets max size {max_size} is larger than " + "maximum max size {self._max_max_size}" + ) + if max_scale > 20: + _logger.warning( + "max_scale is set to %s which is " + "larger than the recommended value of 20", + max_scale, + ) + + # This aggregation is analogous to _ExplicitBucketHistogramAggregation, + # the only difference is that with every call to aggregate, the size + # and amount of buckets can change (in + # _ExplicitBucketHistogramAggregation both size and amount of buckets + # remain constant once it is instantiated). + + super().__init__( + attributes, + reservoir_builder=partial( + reservoir_builder, size=min(20, max_size) + ), + ) + + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._start_time_unix_nano = start_time_unix_nano + self._max_size = max_size + self._max_scale = max_scale + + self._value_positive = None + self._value_negative = None + self._min = inf + self._max = -inf + self._sum = 0 + self._count = 0 + self._zero_count = 0 + self._scale = None + + self._previous_value_positive = None + self._previous_value_negative = None + self._previous_min = inf + self._previous_max = -inf + self._previous_sum = 0 + self._previous_count = 0 + self._previous_zero_count = 0 + self._previous_scale = None + + self._previous_collection_start_nano = self._start_time_unix_nano + + self._mapping = self._new_mapping(self._max_scale) + + def aggregate( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + # pylint: disable=too-many-branches,too-many-statements, too-many-locals + + with self._lock: + if self._value_positive is None: + self._value_positive = Buckets() + if self._value_negative is None: + self._value_negative = Buckets() + + measurement_value = measurement.value + + self._sum += measurement_value + + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) + + self._count += 1 + + if measurement_value == 0: + self._zero_count += 1 + + if self._count == self._zero_count: + self._scale = 0 + + return + + if measurement_value > 0: + value = self._value_positive + + else: + measurement_value = -measurement_value + value = self._value_negative + + # The following code finds out if it is necessary to change the + # buckets to hold the incoming measurement_value, changes them if + # necessary. This process does not exist in + # _ExplicitBucketHistogram aggregation because the buckets there + # are constant in size and amount. + index = self._mapping.map_to_index(measurement_value) + + is_rescaling_needed = False + low, high = 0, 0 + + if len(value) == 0: + value.index_start = index + value.index_end = index + value.index_base = index + + elif ( + index < value.index_start + and (value.index_end - index) >= self._max_size + ): + is_rescaling_needed = True + low = index + high = value.index_end + + elif ( + index > value.index_end + and (index - value.index_start) >= self._max_size + ): + is_rescaling_needed = True + low = value.index_start + high = index + + if is_rescaling_needed: + scale_change = self._get_scale_change(low, high) + self._downscale( + scale_change, + self._value_positive, + self._value_negative, + ) + self._mapping = self._new_mapping( + self._mapping.scale - scale_change + ) + + index = self._mapping.map_to_index(measurement_value) + + self._scale = self._mapping.scale + + if index < value.index_start: + span = value.index_end - index + + if span >= len(value.counts): + value.grow(span + 1, self._max_size) + + value.index_start = index + + elif index > value.index_end: + span = index - value.index_start + + if span >= len(value.counts): + value.grow(span + 1, self._max_size) + + value.index_end = index + + bucket_index = index - value.index_base + + if bucket_index < 0: + bucket_index += len(value.counts) + + # Now the buckets have been changed if needed and bucket_index will + # be used to increment the counter of the bucket that needs to be + # incremented. + + # This is analogous to + # self._value[bisect_left(self._boundaries, measurement_value)] += 1 + # in _ExplicitBucketHistogramAggregation.aggregate + value.increment_bucket(bucket_index) + + self._sample_exemplar(measurement, should_sample_exemplar) + + def collect( + self, + collection_aggregation_temporality: AggregationTemporality, + collection_start_nano: int, + ) -> Optional[_DataPointVarT]: + """ + Atomically return a point for the current value of the metric. + """ + + # pylint: disable=too-many-statements, too-many-locals + with self._lock: + value_positive = self._value_positive + value_negative = self._value_negative + sum_ = self._sum + min_ = self._min + max_ = self._max + count = self._count + zero_count = self._zero_count + scale = self._scale + + self._value_positive = None + self._value_negative = None + self._sum = 0 + self._min = inf + self._max = -inf + self._count = 0 + self._zero_count = 0 + self._scale = None + + if ( + self._instrument_aggregation_temporality + is AggregationTemporality.DELTA + ): + # This happens when the corresponding instrument for this + # aggregation is synchronous. + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = ( + collection_start_nano + ) + + if value_positive is None and value_negative is None: + return None + + return ExponentialHistogramDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + count=count, + sum=sum_, + scale=scale, + zero_count=zero_count, + positive=BucketsPoint( + offset=value_positive.offset, + bucket_counts=(value_positive.get_offset_counts()), + ), + negative=BucketsPoint( + offset=value_negative.offset, + bucket_counts=(value_negative.get_offset_counts()), + ), + # FIXME: Find the right value for flags + flags=0, + min=min_, + max=max_, + ) + + # Here collection_temporality is CUMULATIVE. + # instrument_temporality is always DELTA for the time being. + # Here we need to handle the case where: + # collect is called after at least one other call to collect + # (there is data in previous buckets, a call to merge is needed + # to handle possible differences in bucket sizes). + # collect is called without another call previous call to + # collect was made (there is no previous buckets, previous, + # empty buckets that are the same scale of the current buckets + # need to be made so that they can be cumulatively aggregated + # to the current buckets). + + if ( + value_positive is None + and self._previous_value_positive is None + ): + # This happens if collect is called for the first time + # and aggregate has not yet been called. + value_positive = Buckets() + self._previous_value_positive = value_positive.copy_empty() + if ( + value_negative is None + and self._previous_value_negative is None + ): + value_negative = Buckets() + self._previous_value_negative = value_negative.copy_empty() + if scale is None and self._previous_scale is None: + scale = self._mapping.scale + self._previous_scale = scale + + if ( + value_positive is not None + and self._previous_value_positive is None + ): + # This happens when collect is called the very first time + # and aggregate has been called before. + + # We need previous buckets to add them to the current ones. + # When collect is called for the first time, there are no + # previous buckets, so we need to create empty buckets to + # add them to the current ones. The addition of empty + # buckets to the current ones will result in the current + # ones unchanged. + + # The way the previous buckets are generated here is + # different from the explicit bucket histogram where + # the size and amount of the buckets does not change once + # they are instantiated. Here, the size and amount of the + # buckets can change with every call to aggregate. In order + # to get empty buckets that can be added to the current + # ones resulting in the current ones unchanged we need to + # generate empty buckets that have the same size and amount + # as the current ones, this is what copy_empty does. + self._previous_value_positive = value_positive.copy_empty() + if ( + value_negative is not None + and self._previous_value_negative is None + ): + self._previous_value_negative = value_negative.copy_empty() + if scale is not None and self._previous_scale is None: + self._previous_scale = scale + + if ( + value_positive is None + and self._previous_value_positive is not None + ): + value_positive = self._previous_value_positive.copy_empty() + if ( + value_negative is None + and self._previous_value_negative is not None + ): + value_negative = self._previous_value_negative.copy_empty() + if scale is None and self._previous_scale is not None: + scale = self._previous_scale + + min_scale = min(self._previous_scale, scale) + + low_positive, high_positive = ( + self._get_low_high_previous_current( + self._previous_value_positive, + value_positive, + scale, + min_scale, + ) + ) + low_negative, high_negative = ( + self._get_low_high_previous_current( + self._previous_value_negative, + value_negative, + scale, + min_scale, + ) + ) + + min_scale = min( + min_scale + - self._get_scale_change(low_positive, high_positive), + min_scale + - self._get_scale_change(low_negative, high_negative), + ) + + self._downscale( + self._previous_scale - min_scale, + self._previous_value_positive, + self._previous_value_negative, + ) + + # self._merge adds the values from value to + # self._previous_value, this is analogous to + # self._previous_value = [ + # value_element + previous_value_element + # for ( + # value_element, + # previous_value_element, + # ) in zip(value, self._previous_value) + # ] + # in _ExplicitBucketHistogramAggregation.collect. + self._merge( + self._previous_value_positive, + value_positive, + scale, + min_scale, + collection_aggregation_temporality, + ) + self._merge( + self._previous_value_negative, + value_negative, + scale, + min_scale, + collection_aggregation_temporality, + ) + + self._previous_min = min(min_, self._previous_min) + self._previous_max = max(max_, self._previous_max) + self._previous_sum = sum_ + self._previous_sum + self._previous_count = count + self._previous_count + self._previous_zero_count = ( + zero_count + self._previous_zero_count + ) + self._previous_scale = min_scale + + return ExponentialHistogramDataPoint( + attributes=self._attributes, + exemplars=self._collect_exemplars(), + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + count=self._previous_count, + sum=self._previous_sum, + scale=self._previous_scale, + zero_count=self._previous_zero_count, + positive=BucketsPoint( + offset=self._previous_value_positive.offset, + bucket_counts=( + self._previous_value_positive.get_offset_counts() + ), + ), + negative=BucketsPoint( + offset=self._previous_value_negative.offset, + bucket_counts=( + self._previous_value_negative.get_offset_counts() + ), + ), + # FIXME: Find the right value for flags + flags=0, + min=self._previous_min, + max=self._previous_max, + ) + + return None + + def _get_low_high_previous_current( + self, + previous_point_buckets, + current_point_buckets, + current_scale, + min_scale, + ): + (previous_point_low, previous_point_high) = self._get_low_high( + previous_point_buckets, self._previous_scale, min_scale + ) + (current_point_low, current_point_high) = self._get_low_high( + current_point_buckets, current_scale, min_scale + ) + + if current_point_low > current_point_high: + low = previous_point_low + high = previous_point_high + + elif previous_point_low > previous_point_high: + low = current_point_low + high = current_point_high + + else: + low = min(previous_point_low, current_point_low) + high = max(previous_point_high, current_point_high) + + return low, high + + @staticmethod + def _get_low_high(buckets, scale, min_scale): + if buckets.counts == [0]: + return 0, -1 + + shift = scale - min_scale + + return buckets.index_start >> shift, buckets.index_end >> shift + + @staticmethod + def _new_mapping(scale: int) -> Mapping: + if scale <= 0: + return ExponentMapping(scale) + return LogarithmMapping(scale) + + def _get_scale_change(self, low, high): + change = 0 + + while high - low >= self._max_size: + high = high >> 1 + low = low >> 1 + + change += 1 + + return change + + @staticmethod + def _downscale(change: int, positive, negative): + if change == 0: + return + + if change < 0: + # pylint: disable=broad-exception-raised + raise Exception("Invalid change of scale") + + positive.downscale(change) + negative.downscale(change) + + def _merge( + self, + previous_buckets: Buckets, + current_buckets: Buckets, + current_scale, + min_scale, + aggregation_temporality, + ): + current_change = current_scale - min_scale + + for current_bucket_index, current_bucket in enumerate( + current_buckets.counts + ): + if current_bucket == 0: + continue + + # Not considering the case where len(previous_buckets) == 0. This + # would not happen because self._previous_point is only assigned to + # an ExponentialHistogramDataPoint object if self._count != 0. + + current_index = current_buckets.index_base + current_bucket_index + if current_index > current_buckets.index_end: + current_index -= len(current_buckets.counts) + + index = current_index >> current_change + + if index < previous_buckets.index_start: + span = previous_buckets.index_end - index + + if span >= self._max_size: + # pylint: disable=broad-exception-raised + raise Exception("Incorrect merge scale") + + if span >= len(previous_buckets.counts): + previous_buckets.grow(span + 1, self._max_size) + + previous_buckets.index_start = index + + if index > previous_buckets.index_end: + span = index - previous_buckets.index_start + + if span >= self._max_size: + # pylint: disable=broad-exception-raised + raise Exception("Incorrect merge scale") + + if span >= len(previous_buckets.counts): + previous_buckets.grow(span + 1, self._max_size) + + previous_buckets.index_end = index + + bucket_index = index - previous_buckets.index_base + + if bucket_index < 0: + bucket_index += len(previous_buckets.counts) + + if aggregation_temporality is AggregationTemporality.DELTA: + current_bucket = -current_bucket + + previous_buckets.increment_bucket( + bucket_index, increment=current_bucket + ) + + +class Aggregation(ABC): + """ + Base class for all aggregation types. + """ + + @abstractmethod + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + """Creates an aggregation""" + + +class DefaultAggregation(Aggregation): + """ + The default aggregation to be used in a `View`. + + This aggregation will create an actual aggregation depending on the + instrument type, as specified next: + + ==================================================== ==================================== + Instrument Aggregation + ==================================================== ==================================== + `opentelemetry.sdk.metrics.Counter` `SumAggregation` + `opentelemetry.sdk.metrics.UpDownCounter` `SumAggregation` + `opentelemetry.sdk.metrics.ObservableCounter` `SumAggregation` + `opentelemetry.sdk.metrics.ObservableUpDownCounter` `SumAggregation` + `opentelemetry.sdk.metrics.Histogram` `ExplicitBucketHistogramAggregation` + `opentelemetry.sdk.metrics.ObservableGauge` `LastValueAggregation` + ==================================================== ==================================== + """ + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + # pylint: disable=too-many-return-statements + if isinstance(instrument, Counter): + return _SumAggregation( + attributes, + reservoir_builder=reservoir_factory(_SumAggregation), + instrument_is_monotonic=True, + instrument_aggregation_temporality=( + AggregationTemporality.DELTA + ), + start_time_unix_nano=start_time_unix_nano, + ) + if isinstance(instrument, UpDownCounter): + return _SumAggregation( + attributes, + reservoir_builder=reservoir_factory(_SumAggregation), + instrument_is_monotonic=False, + instrument_aggregation_temporality=( + AggregationTemporality.DELTA + ), + start_time_unix_nano=start_time_unix_nano, + ) + + if isinstance(instrument, ObservableCounter): + return _SumAggregation( + attributes, + reservoir_builder=reservoir_factory(_SumAggregation), + instrument_is_monotonic=True, + instrument_aggregation_temporality=( + AggregationTemporality.CUMULATIVE + ), + start_time_unix_nano=start_time_unix_nano, + ) + + if isinstance(instrument, ObservableUpDownCounter): + return _SumAggregation( + attributes, + reservoir_builder=reservoir_factory(_SumAggregation), + instrument_is_monotonic=False, + instrument_aggregation_temporality=( + AggregationTemporality.CUMULATIVE + ), + start_time_unix_nano=start_time_unix_nano, + ) + + if isinstance(instrument, Histogram): + boundaries = instrument._advisory.explicit_bucket_boundaries + return _ExplicitBucketHistogramAggregation( + attributes, + reservoir_builder=reservoir_factory( + _ExplicitBucketHistogramAggregation + ), + instrument_aggregation_temporality=( + AggregationTemporality.DELTA + ), + boundaries=boundaries, + start_time_unix_nano=start_time_unix_nano, + ) + + if isinstance(instrument, ObservableGauge): + return _LastValueAggregation( + attributes, + reservoir_builder=reservoir_factory(_LastValueAggregation), + ) + + if isinstance(instrument, _Gauge): + return _LastValueAggregation( + attributes, + reservoir_builder=reservoir_factory(_LastValueAggregation), + ) + + # pylint: disable=broad-exception-raised + raise Exception(f"Invalid instrument type {type(instrument)} found") + + +class ExponentialBucketHistogramAggregation(Aggregation): + def __init__( + self, + max_size: int = 160, + max_scale: int = 20, + ): + self._max_size = max_size + self._max_scale = max_scale + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED + if isinstance(instrument, Synchronous): + instrument_aggregation_temporality = AggregationTemporality.DELTA + elif isinstance(instrument, Asynchronous): + instrument_aggregation_temporality = ( + AggregationTemporality.CUMULATIVE + ) + + return _ExponentialBucketHistogramAggregation( + attributes, + reservoir_factory(_ExponentialBucketHistogramAggregation), + instrument_aggregation_temporality, + start_time_unix_nano, + max_size=self._max_size, + max_scale=self._max_scale, + ) + + +class ExplicitBucketHistogramAggregation(Aggregation): + """This aggregation informs the SDK to collect: + + - Count of Measurement values falling within explicit bucket boundaries. + - Arithmetic sum of Measurement values in population. This SHOULD NOT be collected when used with instruments that record negative measurements, e.g. UpDownCounter or ObservableGauge. + - Min (optional) Measurement value in population. + - Max (optional) Measurement value in population. + + + Args: + boundaries: Array of increasing values representing explicit bucket boundary values. + record_min_max: Whether to record min and max. + """ + + def __init__( + self, + boundaries: Optional[Sequence[float]] = None, + record_min_max: bool = True, + ) -> None: + self._boundaries = boundaries + self._record_min_max = record_min_max + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED + if isinstance(instrument, Synchronous): + instrument_aggregation_temporality = AggregationTemporality.DELTA + elif isinstance(instrument, Asynchronous): + instrument_aggregation_temporality = ( + AggregationTemporality.CUMULATIVE + ) + + if self._boundaries is None: + self._boundaries = ( + instrument._advisory.explicit_bucket_boundaries + or _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES + ) + + return _ExplicitBucketHistogramAggregation( + attributes, + instrument_aggregation_temporality, + start_time_unix_nano, + reservoir_factory(_ExplicitBucketHistogramAggregation), + self._boundaries, + self._record_min_max, + ) + + +class SumAggregation(Aggregation): + """This aggregation informs the SDK to collect: + + - The arithmetic sum of Measurement values. + """ + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED + if isinstance(instrument, Synchronous): + instrument_aggregation_temporality = AggregationTemporality.DELTA + elif isinstance(instrument, Asynchronous): + instrument_aggregation_temporality = ( + AggregationTemporality.CUMULATIVE + ) + + return _SumAggregation( + attributes, + isinstance(instrument, (Counter, ObservableCounter)), + instrument_aggregation_temporality, + start_time_unix_nano, + reservoir_factory(_SumAggregation), + ) + + +class LastValueAggregation(Aggregation): + """ + This aggregation informs the SDK to collect: + + - The last Measurement. + - The timestamp of the last Measurement. + """ + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + return _LastValueAggregation( + attributes, + reservoir_builder=reservoir_factory(_LastValueAggregation), + ) + + +class DropAggregation(Aggregation): + """Using this aggregation will make all measurements be ignored.""" + + def _create_aggregation( + self, + instrument: Instrument, + attributes: Attributes, + reservoir_factory: Callable[ + [Type[_Aggregation]], ExemplarReservoirBuilder + ], + start_time_unix_nano: int, + ) -> _Aggregation: + return _DropAggregation( + attributes, reservoir_factory(_DropAggregation) + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py new file mode 100644 index 00000000..0f8c3a75 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exceptions.py @@ -0,0 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class MetricsTimeoutError(Exception): + """Raised when a metrics function times out""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py new file mode 100644 index 00000000..ee93dd18 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/__init__.py @@ -0,0 +1,39 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .exemplar import Exemplar +from .exemplar_filter import ( + AlwaysOffExemplarFilter, + AlwaysOnExemplarFilter, + ExemplarFilter, + TraceBasedExemplarFilter, +) +from .exemplar_reservoir import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoir, + ExemplarReservoirBuilder, + SimpleFixedSizeExemplarReservoir, +) + +__all__ = [ + "Exemplar", + "ExemplarFilter", + "AlwaysOffExemplarFilter", + "AlwaysOnExemplarFilter", + "TraceBasedExemplarFilter", + "AlignedHistogramBucketExemplarReservoir", + "ExemplarReservoir", + "ExemplarReservoirBuilder", + "SimpleFixedSizeExemplarReservoir", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py new file mode 100644 index 00000000..95582e16 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar.py @@ -0,0 +1,50 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +from typing import Optional, Union + +from opentelemetry.util.types import Attributes + + +@dataclasses.dataclass(frozen=True) +class Exemplar: + """A representation of an exemplar, which is a sample input measurement. + + Exemplars also hold information about the environment when the measurement + was recorded, for example the span and trace ID of the active span when the + exemplar was recorded. + + Attributes + trace_id: (optional) The trace associated with a recording + span_id: (optional) The span associated with a recording + time_unix_nano: The time of the observation + value: The recorded value + filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made. + + References: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar + """ + + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated + # one will come from napoleon extension and the other from autodoc extension. This + # will raise an sphinx error of duplicated object description + # See https://github.com/sphinx-doc/sphinx/issues/8664 + + filtered_attributes: Attributes + value: Union[int, float] + time_unix_nano: int + span_id: Optional[int] = None + trace_id: Optional[int] = None diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py new file mode 100644 index 00000000..8961d101 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_filter.py @@ -0,0 +1,134 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from typing import Union + +from opentelemetry import trace +from opentelemetry.context import Context +from opentelemetry.trace.span import INVALID_SPAN +from opentelemetry.util.types import Attributes + + +class ExemplarFilter(ABC): + """``ExemplarFilter`` determines which measurements are eligible for becoming an + ``Exemplar``. + + Exemplar filters are used to filter measurements before attempting to store them + in a reservoir. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter + """ + + @abstractmethod + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Args: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + context: The Context of the measurement + """ + raise NotImplementedError( + "ExemplarFilter.should_sample is not implemented" + ) + + +class AlwaysOnExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes all measurements eligible for being an Exemplar. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Args: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + context: The Context of the measurement + """ + return True + + +class AlwaysOffExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes no measurements eligible for being an Exemplar. + + Using this ExemplarFilter is as good as disabling Exemplar feature. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Args: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + context: The Context of the measurement + """ + return False + + +class TraceBasedExemplarFilter(ExemplarFilter): + """An ExemplarFilter which makes those measurements eligible for being an Exemplar, + which are recorded in the context of a sampled parent span. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased + """ + + def should_sample( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> bool: + """Returns whether or not a reservoir should attempt to filter a measurement. + + Args: + value: The value of the measurement + timestamp: A timestamp that best represents when the measurement was taken + attributes: The complete set of measurement attributes + context: The Context of the measurement + """ + span = trace.get_current_span(context) + if span == INVALID_SPAN: + return False + return span.get_span_context().trace_flags.sampled diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py new file mode 100644 index 00000000..22d1ee9f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exemplar/exemplar_reservoir.py @@ -0,0 +1,332 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from collections import defaultdict +from random import randrange +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + Optional, + Sequence, + Union, +) + +from opentelemetry import trace +from opentelemetry.context import Context +from opentelemetry.trace.span import INVALID_SPAN +from opentelemetry.util.types import Attributes + +from .exemplar import Exemplar + + +class ExemplarReservoir(ABC): + """ExemplarReservoir provide a method to offer measurements to the reservoir + and another to collect accumulated Exemplars. + + Note: + The constructor MUST accept ``**kwargs`` that may be set from aggregation + parameters. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir + """ + + @abstractmethod + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> None: + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ + raise NotImplementedError("ExemplarReservoir.offer is not implemented") + + @abstractmethod + def collect(self, point_attributes: Attributes) -> List[Exemplar]: + """Returns accumulated Exemplars and also resets the reservoir for the next + sampling period + + Args: + point_attributes: The attributes associated with metric point. + + Returns: + a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned + exemplars contain the attributes that were filtered out by the aggregator, + but recorded alongside the original measurement. + """ + raise NotImplementedError( + "ExemplarReservoir.collect is not implemented" + ) + + +class ExemplarBucket: + def __init__(self) -> None: + self.__value: Union[int, float] = 0 + self.__attributes: Attributes = None + self.__time_unix_nano: int = 0 + self.__span_id: Optional[int] = None + self.__trace_id: Optional[int] = None + self.__offered: bool = False + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> None: + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ + self.__value = value + self.__time_unix_nano = time_unix_nano + self.__attributes = attributes + span = trace.get_current_span(context) + if span != INVALID_SPAN: + span_context = span.get_span_context() + self.__span_id = span_context.span_id + self.__trace_id = span_context.trace_id + + self.__offered = True + + def collect(self, point_attributes: Attributes) -> Optional[Exemplar]: + """May return an Exemplar and resets the bucket for the next sampling period.""" + if not self.__offered: + return None + + # filters out attributes from the measurement that are already included in the metric data point + # See the specification for more details: + # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar + filtered_attributes = ( + { + k: v + for k, v in self.__attributes.items() + if k not in point_attributes + } + if self.__attributes + else None + ) + + exemplar = Exemplar( + filtered_attributes, + self.__value, + self.__time_unix_nano, + self.__span_id, + self.__trace_id, + ) + self.__reset() + return exemplar + + def __reset(self) -> None: + """Reset the bucket state after a collection cycle.""" + self.__value = 0 + self.__attributes = {} + self.__time_unix_nano = 0 + self.__span_id = None + self.__trace_id = None + self.__offered = False + + +class BucketIndexError(ValueError): + """An exception raised when the bucket index cannot be found.""" + + +class FixedSizeExemplarReservoirABC(ExemplarReservoir): + """Abstract class for a reservoir with fixed size.""" + + def __init__(self, size: int, **kwargs) -> None: + super().__init__(**kwargs) + self._size: int = size + self._reservoir_storage: Mapping[int, ExemplarBucket] = defaultdict( + ExemplarBucket + ) + + def collect(self, point_attributes: Attributes) -> List[Exemplar]: + """Returns accumulated Exemplars and also resets the reservoir for the next + sampling period + + Args: + point_attributes: The attributes associated with metric point. + + Returns: + a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned + exemplars contain the attributes that were filtered out by the aggregator, + but recorded alongside the original measurement. + """ + exemplars = [ + e + for e in ( + bucket.collect(point_attributes) + for _, bucket in sorted(self._reservoir_storage.items()) + ) + if e is not None + ] + self._reset() + return exemplars + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> None: + """Offers a measurement to be sampled. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + """ + try: + index = self._find_bucket_index( + value, time_unix_nano, attributes, context + ) + + self._reservoir_storage[index].offer( + value, time_unix_nano, attributes, context + ) + except BucketIndexError: + # Ignore invalid bucket index + pass + + @abstractmethod + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> int: + """Determines the bucket index for the given measurement. + + It should be implemented by subclasses based on specific strategies. + + Args: + value: Measured value + time_unix_nano: Measurement instant + attributes: Measurement attributes + context: Measurement context + + Returns: + The bucket index + + Raises: + BucketIndexError: If no bucket index can be found. + """ + + def _reset(self) -> None: + """Reset the reservoir by resetting any stateful logic after a collection cycle.""" + + +class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): + """This reservoir uses an uniformly-weighted sampling algorithm based on the number + of samples the reservoir has seen so far to determine if the offered measurements + should be sampled. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + """ + + def __init__(self, size: int = 1, **kwargs) -> None: + super().__init__(size, **kwargs) + self._measurements_seen: int = 0 + + def _reset(self) -> None: + super()._reset() + self._measurements_seen = 0 + + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> int: + self._measurements_seen += 1 + if self._measurements_seen < self._size: + return self._measurements_seen - 1 + + index = randrange(0, self._measurements_seen) + if index < self._size: + return index + + raise BucketIndexError("Unable to find the bucket index.") + + +class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC): + """This Exemplar reservoir takes a configuration parameter that is the + configuration of a Histogram. This implementation keeps the last seen measurement + that falls within a histogram bucket. + + Reference: + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir + """ + + def __init__(self, boundaries: Sequence[float], **kwargs) -> None: + super().__init__(len(boundaries) + 1, **kwargs) + self._boundaries: Sequence[float] = boundaries + + def offer( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> None: + """Offers a measurement to be sampled.""" + index = self._find_bucket_index( + value, time_unix_nano, attributes, context + ) + self._reservoir_storage[index].offer( + value, time_unix_nano, attributes, context + ) + + def _find_bucket_index( + self, + value: Union[int, float], + time_unix_nano: int, + attributes: Attributes, + context: Context, + ) -> int: + for index, boundary in enumerate(self._boundaries): + if value <= boundary: + return index + return len(self._boundaries) + + +ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir] +ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. + +It may receive the Aggregation parameters it is bounded to; e.g. +the _ExplicitBucketHistogramAggregation will provide the boundaries. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/__init__.py diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py new file mode 100644 index 00000000..e8a93326 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py @@ -0,0 +1,190 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from math import ceil, log2 + + +class Buckets: + # No method of this class is protected by locks because instances of this + # class are only used in methods that are protected by locks themselves. + + def __init__(self): + self._counts = [0] + + # The term index refers to the number of the exponential histogram bucket + # used to determine its boundaries. The lower boundary of a bucket is + # determined by base ** index and the upper boundary of a bucket is + # determined by base ** (index + 1). index values are signedto account + # for values less than or equal to 1. + + # self._index_* will all have values equal to a certain index that is + # determined by the corresponding mapping _map_to_index function and + # the value of the index depends on the value passed to _map_to_index. + + # Index of the 0th position in self._counts: self._counts[0] is the + # count in the bucket with index self.__index_base. + self.__index_base = 0 + + # self.__index_start is the smallest index value represented in + # self._counts. + self.__index_start = 0 + + # self.__index_start is the largest index value represented in + # self._counts. + self.__index_end = 0 + + @property + def index_start(self) -> int: + return self.__index_start + + @index_start.setter + def index_start(self, value: int) -> None: + self.__index_start = value + + @property + def index_end(self) -> int: + return self.__index_end + + @index_end.setter + def index_end(self, value: int) -> None: + self.__index_end = value + + @property + def index_base(self) -> int: + return self.__index_base + + @index_base.setter + def index_base(self, value: int) -> None: + self.__index_base = value + + @property + def counts(self): + return self._counts + + def get_offset_counts(self): + bias = self.__index_base - self.__index_start + return self._counts[-bias:] + self._counts[:-bias] + + def grow(self, needed: int, max_size: int) -> None: + size = len(self._counts) + bias = self.__index_base - self.__index_start + old_positive_limit = size - bias + + # 2 ** ceil(log2(needed)) finds the smallest power of two that is larger + # or equal than needed: + # 2 ** ceil(log2(1)) == 1 + # 2 ** ceil(log2(2)) == 2 + # 2 ** ceil(log2(3)) == 4 + # 2 ** ceil(log2(4)) == 4 + # 2 ** ceil(log2(5)) == 8 + # 2 ** ceil(log2(6)) == 8 + # 2 ** ceil(log2(7)) == 8 + # 2 ** ceil(log2(8)) == 8 + new_size = min(2 ** ceil(log2(needed)), max_size) + + new_positive_limit = new_size - bias + + tmp = [0] * new_size + tmp[new_positive_limit:] = self._counts[old_positive_limit:] + tmp[0:old_positive_limit] = self._counts[0:old_positive_limit] + self._counts = tmp + + @property + def offset(self) -> int: + return self.__index_start + + def __len__(self) -> int: + if len(self._counts) == 0: + return 0 + + if self.__index_end == self.__index_start and self[0] == 0: + return 0 + + return self.__index_end - self.__index_start + 1 + + def __getitem__(self, key: int) -> int: + bias = self.__index_base - self.__index_start + + if key < bias: + key += len(self._counts) + + key -= bias + + return self._counts[key] + + def downscale(self, amount: int) -> None: + """ + Rotates, then collapses 2 ** amount to 1 buckets. + """ + + bias = self.__index_base - self.__index_start + + if bias != 0: + self.__index_base = self.__index_start + + # [0, 1, 2, 3, 4] Original backing array + + self._counts = self._counts[::-1] + # [4, 3, 2, 1, 0] + + self._counts = ( + self._counts[:bias][::-1] + self._counts[bias:][::-1] + ) + # [3, 4, 0, 1, 2] This is a rotation of the backing array. + + size = 1 + self.__index_end - self.__index_start + each = 1 << amount + inpos = 0 + outpos = 0 + + pos = self.__index_start + + while pos <= self.__index_end: + mod = pos % each + if mod < 0: + mod += each + + index = mod + + while index < each and inpos < size: + if outpos != inpos: + self._counts[outpos] += self._counts[inpos] + self._counts[inpos] = 0 + + inpos += 1 + pos += 1 + index += 1 + + outpos += 1 + + self.__index_start >>= amount + self.__index_end >>= amount + self.__index_base = self.__index_start + + def increment_bucket(self, bucket_index: int, increment: int = 1) -> None: + self._counts[bucket_index] += increment + + def copy_empty(self) -> "Buckets": + copy = Buckets() + + # pylint: disable=no-member + # pylint: disable=protected-access + # pylint: disable=attribute-defined-outside-init + # pylint: disable=invalid-name + copy._Buckets__index_base = self._Buckets__index_base + copy._Buckets__index_start = self._Buckets__index_start + copy._Buckets__index_end = self._Buckets__index_end + copy._counts = [0 for _ in self._counts] + + return copy diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py new file mode 100644 index 00000000..387b1d14 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/__init__.py @@ -0,0 +1,98 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod + + +class Mapping(ABC): + """ + Parent class for `LogarithmMapping` and `ExponentialMapping`. + """ + + # pylint: disable=no-member + def __new__(cls, scale: int): + with cls._mappings_lock: + # cls._mappings and cls._mappings_lock are implemented in each of + # the child classes as a dictionary and a lock, respectively. They + # are not instantiated here because that would lead to both child + # classes having the same instance of cls._mappings and + # cls._mappings_lock. + if scale not in cls._mappings: + cls._mappings[scale] = super().__new__(cls) + cls._mappings[scale]._init(scale) + + return cls._mappings[scale] + + @abstractmethod + def _init(self, scale: int) -> None: + # pylint: disable=attribute-defined-outside-init + + if scale > self._get_max_scale(): + # pylint: disable=broad-exception-raised + raise Exception(f"scale is larger than {self._max_scale}") + + if scale < self._get_min_scale(): + # pylint: disable=broad-exception-raised + raise Exception(f"scale is smaller than {self._min_scale}") + + # The size of the exponential histogram buckets is determined by a + # parameter known as scale, larger values of scale will produce smaller + # buckets. Bucket boundaries of the exponential histogram are located + # at integer powers of the base, where: + # + # base = 2 ** (2 ** (-scale)) + # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#all-scales-use-the-logarithm-function + self._scale = scale + + @abstractmethod + def _get_min_scale(self) -> int: + """ + Return the smallest possible value for the mapping scale + """ + + @abstractmethod + def _get_max_scale(self) -> int: + """ + Return the largest possible value for the mapping scale + """ + + @abstractmethod + def map_to_index(self, value: float) -> int: + """ + Maps positive floating point values to indexes corresponding to + `Mapping.scale`. Implementations are not expected to handle zeros, + +inf, NaN, or negative values. + """ + + @abstractmethod + def get_lower_boundary(self, index: int) -> float: + """ + Returns the lower boundary of a given bucket index. The index is + expected to map onto a range that is at least partially inside the + range of normal floating point values. If the corresponding + bucket's upper boundary is less than or equal to 2 ** -1022, + :class:`~opentelemetry.sdk.metrics.MappingUnderflowError` + will be raised. If the corresponding bucket's lower boundary is greater + than ``sys.float_info.max``, + :class:`~opentelemetry.sdk.metrics.MappingOverflowError` + will be raised. + """ + + @property + def scale(self) -> int: + """ + Returns the parameter that controls the resolution of this mapping. + See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale + """ + return self._scale diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py new file mode 100644 index 00000000..477ed6f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/errors.py @@ -0,0 +1,26 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class MappingUnderflowError(Exception): + """ + Raised when computing the lower boundary of an index that maps into a + denormal floating point value. + """ + + +class MappingOverflowError(Exception): + """ + Raised when computing the lower boundary of an index that maps into +inf. + """ diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py new file mode 100644 index 00000000..297bb7a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/exponent_mapping.py @@ -0,0 +1,141 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from math import ldexp +from threading import Lock + +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( + Mapping, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( + MappingOverflowError, + MappingUnderflowError, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( + MANTISSA_WIDTH, + MAX_NORMAL_EXPONENT, + MIN_NORMAL_EXPONENT, + MIN_NORMAL_VALUE, + get_ieee_754_exponent, + get_ieee_754_mantissa, +) + + +class ExponentMapping(Mapping): + # Reference implementation here: + # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go + + _mappings = {} + _mappings_lock = Lock() + + _min_scale = -10 + _max_scale = 0 + + def _get_min_scale(self): + # _min_scale defines the point at which the exponential mapping + # function becomes useless for 64-bit floats. With scale -10, ignoring + # subnormal values, bucket indices range from -1 to 1. + return -10 + + def _get_max_scale(self): + # _max_scale is the largest scale supported by exponential mapping. Use + # a logarithm mapping for larger scales. + return 0 + + def _init(self, scale: int): + # pylint: disable=attribute-defined-outside-init + + super()._init(scale) + + # self._min_normal_lower_boundary_index is the largest index such that + # base ** index < MIN_NORMAL_VALUE and + # base ** (index + 1) >= MIN_NORMAL_VALUE. An exponential histogram + # bucket with this index covers the range + # (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This + # is the smallest valid index that contains at least one normal value. + index = MIN_NORMAL_EXPONENT >> -self._scale + + if -self._scale < 2: + # For scales -1 and 0, the maximum value 2 ** -1022 is a + # power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE. + # Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE. + index -= 1 + + self._min_normal_lower_boundary_index = index + + # self._max_normal_lower_boundary_index is the index such that + # base**index equals the greatest representable lower boundary. An + # exponential histogram bucket with this index covers the range + # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk. + # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE. + # This bucket is incomplete, since the upper boundary cannot be + # represented. One greater than this index corresponds with the bucket + # containing values > 2 ** 1024. + self._max_normal_lower_boundary_index = ( + MAX_NORMAL_EXPONENT >> -self._scale + ) + + def map_to_index(self, value: float) -> int: + if value < MIN_NORMAL_VALUE: + return self._min_normal_lower_boundary_index + + exponent = get_ieee_754_exponent(value) + + # Positive integers are represented in binary as having an infinite + # amount of leading zeroes, for example 2 is represented as ...00010. + + # A negative integer -x is represented in binary as the complement of + # (x - 1). For example, -4 is represented as the complement of 4 - 1 + # == 3. 3 is represented as ...00011. Its compliment is ...11100, the + # binary representation of -4. + + # get_ieee_754_mantissa(value) gets the positive integer made up + # from the rightmost MANTISSA_WIDTH bits (the mantissa) of the IEEE + # 754 representation of value. If value is an exact power of 2, all + # these MANTISSA_WIDTH bits would be all zeroes, and when 1 is + # subtracted the resulting value is -1. The binary representation of + # -1 is ...111, so when these bits are right shifted MANTISSA_WIDTH + # places, the resulting value for correction is -1. If value is not an + # exact power of 2, at least one of the rightmost MANTISSA_WIDTH + # bits would be 1 (even for values whose decimal part is 0, like 5.0 + # since the IEEE 754 of such number is too the product of a power of 2 + # (defined in the exponent part of the IEEE 754 representation) and the + # value defined in the mantissa). Having at least one of the rightmost + # MANTISSA_WIDTH bit being 1 means that get_ieee_754(value) will + # always be greater or equal to 1, and when 1 is subtracted, the + # result will be greater or equal to 0, whose representation in binary + # will be of at most MANTISSA_WIDTH ones that have an infinite + # amount of leading zeroes. When those MANTISSA_WIDTH bits are + # shifted to the right MANTISSA_WIDTH places, the resulting value + # will be 0. + + # In summary, correction will be -1 if value is a power of 2, 0 if not. + + # FIXME Document why we can assume value will not be 0, inf, or NaN. + correction = (get_ieee_754_mantissa(value) - 1) >> MANTISSA_WIDTH + + return (exponent + correction) >> -self._scale + + def get_lower_boundary(self, index: int) -> float: + if index < self._min_normal_lower_boundary_index: + raise MappingUnderflowError() + + if index > self._max_normal_lower_boundary_index: + raise MappingOverflowError() + + return ldexp(1, index << -self._scale) + + @property + def scale(self) -> int: + return self._scale diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md new file mode 100644 index 00000000..0cf5c8c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.md @@ -0,0 +1,175 @@ +# IEEE 754 Explained + +IEEE 754 is a standard that defines a way to represent certain mathematical +objects using binary numbers. + +## Binary Number Fields + +The binary numbers used in IEEE 754 can have different lengths, the length that +is interesting for the purposes of this project is 64 bits. These binary +numbers are made up of 3 contiguous fields of bits, from left to right: + +1. 1 sign bit +2. 11 exponent bits +3. 52 mantissa bits + +Depending on the values these fields have, the represented mathematical object +can be one of: + +* Floating point number +* Zero +* NaN +* Infinite + +## Floating Point Numbers + +IEEE 754 represents a floating point number $f$ using an exponential +notation with 4 components: $sign$, $mantissa$, $base$ and $exponent$: + +$$f = sign \times mantissa \times base ^ {exponent}$$ + +There are two possible representations of floating point numbers: +_normal_ and _denormal_, which have different valid values for +their $mantissa$ and $exponent$ fields. + +### Binary Representation + +$sign$, $mantissa$, and $exponent$ are represented in binary, the +representation of each component has certain details explained next. + +$base$ is always $2$ and it is not represented in binary. + +#### Sign + +$sign$ can have 2 values: + +1. $1$ if the `sign` bit is `0` +2. $-1$ if the `sign` bit is `1`. + +#### Mantissa + +##### Normal Floating Point Numbers + +$mantissa$ is a positive fractional number whose integer part is $1$, for example +$1.2345 \dots$. The `mantissa` bits represent only the fractional part and the +$mantissa$ value can be calculated as: + +$$mantissa = 1 + \sum_{i=1}^{52} b_{i} \times 2^{-i} = 1 + \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$ + +Where $b_{i}$ is: + +1. $0$ if the bit at the position `i - 1` is `0`. +2. $1$ if the bit at the position `i - 1` is `1`. + +##### Denormal Floating Point Numbers + +$mantissa$ is a positive fractional number whose integer part is $0$, for example +$0.12345 \dots$. The `mantissa` bits represent only the fractional part and the +$mantissa$ value can be calculated as: + +$$mantissa = \sum_{i=1}^{52} b_{i} \times 2^{-i} = \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$ + +Where $b_{i}$ is: + +1. $0$ if the bit at the position `i - 1` is `0`. +2. $1$ if the bit at the position `i - 1` is `1`. + +#### Exponent + +##### Normal Floating Point Numbers + +Only the following bit sequences are allowed: `00000000001` to `11111111110`. +That is, there must be at least one `0` and one `1` in the exponent bits. + +The actual value of the $exponent$ can be calculated as: + +$$exponent = v - bias$$ + +where $v$ is the value of the binary number in the exponent bits and $bias$ is $1023$. +Considering the restrictions above, the respective minimum and maximum values for the +exponent are: + +1. `00000000001` = $1$, $1 - 1023 = -1022$ +2. `11111111110` = $2046$, $2046 - 1023 = 1023$ + +So, $exponent$ is an integer in the range $\left[-1022, 1023\right]$. + + +##### Denormal Floating Point Numbers + +$exponent$ is always $-1022$. Nevertheless, it is always represented as `00000000000`. + +### Normal and Denormal Floating Point Numbers + +The smallest absolute value a normal floating point number can have is calculated +like this: + +$$1 \times 1.0\dots0 \times 2^{-1022} = 2.2250738585072014 \times 10^{-308}$$ + +Since normal floating point numbers always have a $1$ as the integer part of the +$mantissa$, then smaller values can be achieved by using the smallest possible exponent +( $-1022$ ) and a $0$ in the integer part of the $mantissa$, but significant digits are lost. + +The smallest absolute value a denormal floating point number can have is calculated +like this: + +$$1 \times 2^{-52} \times 2^{-1022} = 5 \times 10^{-324}$$ + +## Zero + +Zero is represented like this: + +* Sign bit: `X` +* Exponent bits: `00000000000` +* Mantissa bits: `0000000000000000000000000000000000000000000000000000` + +where `X` means `0` or `1`. + +## NaN + +There are 2 kinds of NaNs that are represented: + +1. QNaNs (Quiet NaNs): represent the result of indeterminate operations. +2. SNaNs (Signalling NaNs): represent the result of invalid operations. + +### QNaNs + +QNaNs are represented like this: + +* Sign bit: `X` +* Exponent bits: `11111111111` +* Mantissa bits: `1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX` + +where `X` means `0` or `1`. + +### SNaNs + +SNaNs are represented like this: + +* Sign bit: `X` +* Exponent bits: `11111111111` +* Mantissa bits: `0XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1` + +where `X` means `0` or `1`. + +## Infinite + +### Positive Infinite + +Positive infinite is represented like this: + +* Sign bit: `0` +* Exponent bits: `11111111111` +* Mantissa bits: `0000000000000000000000000000000000000000000000000000` + +where `X` means `0` or `1`. + +### Negative Infinite + +Negative infinite is represented like this: + +* Sign bit: `1` +* Exponent bits: `11111111111` +* Mantissa bits: `0000000000000000000000000000000000000000000000000000` + +where `X` means `0` or `1`. diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py new file mode 100644 index 00000000..d4b7e861 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/ieee_754.py @@ -0,0 +1,117 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ctypes import c_double, c_uint64 +from sys import float_info + +# IEEE 754 64-bit floating point numbers use 11 bits for the exponent and 52 +# bits for the mantissa. +MANTISSA_WIDTH = 52 +EXPONENT_WIDTH = 11 + +# This mask is equivalent to 52 "1" bits (there are 13 hexadecimal 4-bit "f"s +# in the mantissa mask, 13 * 4 == 52) or 0xfffffffffffff in hexadecimal. +MANTISSA_MASK = (1 << MANTISSA_WIDTH) - 1 + +# There are 11 bits for the exponent, but the exponent values 0 (11 "0" +# bits) and 2047 (11 "1" bits) have special meanings so the exponent range is +# from 1 to 2046. To calculate the exponent value, 1023 (the bias) is +# subtracted from the exponent, so the exponent value range is from -1022 to +# +1023. +EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1 + +# All the exponent mask bits are set to 1 for the 11 exponent bits. +EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH + +# The sign mask has the first bit set to 1 and the rest to 0. +SIGN_MASK = 1 << (EXPONENT_WIDTH + MANTISSA_WIDTH) + +# For normal floating point numbers, the exponent can have a value in the +# range [-1022, 1023]. +MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1 +MAX_NORMAL_EXPONENT = EXPONENT_BIAS + +# The smallest possible normal value is 2.2250738585072014e-308. +# This value is the result of using the smallest possible number in the +# mantissa, 1.0000000000000000000000000000000000000000000000000000 (52 "0"s in +# the fractional part) and a single "1" in the exponent. +# Finally 1 * (2 ** -1022) = 2.2250738585072014e-308. +MIN_NORMAL_VALUE = float_info.min + +# Greatest possible normal value (1.7976931348623157e+308) +# The binary representation of a float in scientific notation uses (for the +# mantissa) one bit for the integer part (which is implicit) and 52 bits for +# the fractional part. Consider a float binary 1.111. It is equal to 1 + 1/2 + +# 1/4 + 1/8. The greatest possible value in the 52-bit binary mantissa would be +# then 1.1111111111111111111111111111111111111111111111111111 (52 "1"s in the +# fractional part) whose decimal value is 1.9999999999999998. Finally, +# 1.9999999999999998 * (2 ** 1023) = 1.7976931348623157e+308. +MAX_NORMAL_VALUE = float_info.max + + +def get_ieee_754_exponent(value: float) -> int: + """ + Gets the exponent of the IEEE 754 representation of a float. + """ + + return ( + ( + # This step gives the integer that corresponds to the IEEE 754 + # representation of a float. For example, consider + # -MAX_NORMAL_VALUE for an example. We choose this value because + # of its binary representation which makes easy to understand the + # subsequent operations. + # + # c_uint64.from_buffer(c_double(-MAX_NORMAL_VALUE)).value == 18442240474082181119 + # bin(18442240474082181119) == '0b1111111111101111111111111111111111111111111111111111111111111111' + # + # The first bit of the previous binary number is the sign bit: 1 (1 means negative, 0 means positive) + # The next 11 bits are the exponent bits: 11111111110 + # The next 52 bits are the mantissa bits: 1111111111111111111111111111111111111111111111111111 + # + # This step isolates the exponent bits, turning every bit outside + # of the exponent field (sign and mantissa bits) to 0. + c_uint64.from_buffer(c_double(value)).value & EXPONENT_MASK + # For the example this means: + # 18442240474082181119 & EXPONENT_MASK == 9214364837600034816 + # bin(9214364837600034816) == '0b111111111100000000000000000000000000000000000000000000000000000' + # Notice that the previous binary representation does not include + # leading zeroes, so the sign bit is not included since it is a + # zero. + ) + # This step moves the exponent bits to the right, removing the + # mantissa bits that were set to 0 by the previous step. This + # leaves the IEEE 754 exponent value, ready for the next step. + >> MANTISSA_WIDTH + # For the example this means: + # 9214364837600034816 >> MANTISSA_WIDTH == 2046 + # bin(2046) == '0b11111111110' + # As shown above, these are the original 11 bits that correspond to the + # exponent. + # This step subtracts the exponent bias from the IEEE 754 value, + # leaving the actual exponent value. + ) - EXPONENT_BIAS + # For the example this means: + # 2046 - EXPONENT_BIAS == 1023 + # As mentioned in a comment above, the largest value for the exponent is + + +def get_ieee_754_mantissa(value: float) -> int: + return ( + c_uint64.from_buffer(c_double(value)).value + # This step isolates the mantissa bits. There is no need to do any + # bit shifting as the mantissa bits are already the rightmost field + # in an IEEE 754 representation. + & MANTISSA_MASK + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py new file mode 100644 index 00000000..e73f3a81 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping/logarithm_mapping.py @@ -0,0 +1,138 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from math import exp, floor, ldexp, log +from threading import Lock + +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( + Mapping, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( + MappingOverflowError, + MappingUnderflowError, +) +from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( + MAX_NORMAL_EXPONENT, + MIN_NORMAL_EXPONENT, + MIN_NORMAL_VALUE, + get_ieee_754_exponent, + get_ieee_754_mantissa, +) + + +class LogarithmMapping(Mapping): + # Reference implementation here: + # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go + + _mappings = {} + _mappings_lock = Lock() + + _min_scale = 1 + _max_scale = 20 + + def _get_min_scale(self): + # _min_scale ensures that ExponentMapping is used for zero and negative + # scale values. + return self._min_scale + + def _get_max_scale(self): + # FIXME The Go implementation uses a value of 20 here, find out the + # right value for this implementation, more information here: + # https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function + # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45 + return self._max_scale + + def _init(self, scale: int): + # pylint: disable=attribute-defined-outside-init + + super()._init(scale) + + # self._scale_factor is defined as a multiplier because multiplication + # is faster than division. self._scale_factor is defined as: + # index = log(value) * self._scale_factor + # Where: + # index = log(value) / log(base) + # index = log(value) / log(2 ** (2 ** -scale)) + # index = log(value) / ((2 ** -scale) * log(2)) + # index = log(value) * ((1 / log(2)) * (2 ** scale)) + # self._scale_factor = ((1 / log(2)) * (2 ** scale)) + # self._scale_factor = (1 /log(2)) * (2 ** scale) + # self._scale_factor = ldexp(1 / log(2), scale) + # This implementation was copied from a Java prototype. See: + # https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java + # for the equations used here. + self._scale_factor = ldexp(1 / log(2), scale) + + # self._min_normal_lower_boundary_index is the index such that + # base ** index == MIN_NORMAL_VALUE. An exponential histogram bucket + # with this index covers the range + # (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index + # corresponds with the bucket containing values <= MIN_NORMAL_VALUE. + self._min_normal_lower_boundary_index = ( + MIN_NORMAL_EXPONENT << self._scale + ) + + # self._max_normal_lower_boundary_index is the index such that + # base ** index equals the greatest representable lower boundary. An + # exponential histogram bucket with this index covers the range + # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk. + # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE. + # This bucket is incomplete, since the upper boundary cannot be + # represented. One greater than this index corresponds with the bucket + # containing values > 2 ** 1024. + self._max_normal_lower_boundary_index = ( + (MAX_NORMAL_EXPONENT + 1) << self._scale + ) - 1 + + def map_to_index(self, value: float) -> int: + """ + Maps positive floating point values to indexes corresponding to scale. + """ + + # value is subnormal + if value <= MIN_NORMAL_VALUE: + return self._min_normal_lower_boundary_index - 1 + + # value is an exact power of two. + if get_ieee_754_mantissa(value) == 0: + exponent = get_ieee_754_exponent(value) + return (exponent << self._scale) - 1 + + return min( + floor(log(value) * self._scale_factor), + self._max_normal_lower_boundary_index, + ) + + def get_lower_boundary(self, index: int) -> float: + if index >= self._max_normal_lower_boundary_index: + if index == self._max_normal_lower_boundary_index: + return 2 * exp( + (index - (1 << self._scale)) / self._scale_factor + ) + raise MappingOverflowError() + + if index <= self._min_normal_lower_boundary_index: + if index == self._min_normal_lower_boundary_index: + return MIN_NORMAL_VALUE + if index == self._min_normal_lower_boundary_index - 1: + return ( + exp((index + (1 << self._scale)) / self._scale_factor) / 2 + ) + raise MappingUnderflowError() + + return exp(index / self._scale_factor) + + @property + def scale(self) -> int: + return self._scale diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py new file mode 100644 index 00000000..52c68334 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/export/__init__.py @@ -0,0 +1,576 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import math +import os +import weakref +from abc import ABC, abstractmethod +from enum import Enum +from logging import getLogger +from os import environ, linesep +from sys import stdout +from threading import Event, Lock, RLock, Thread +from time import time_ns +from typing import IO, Callable, Iterable, Optional + +from typing_extensions import final + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics._internal +from opentelemetry.context import ( + _SUPPRESS_INSTRUMENTATION_KEY, + attach, + detach, + set_value, +) +from opentelemetry.sdk.environment_variables import ( + OTEL_METRIC_EXPORT_INTERVAL, + OTEL_METRIC_EXPORT_TIMEOUT, +) +from opentelemetry.sdk.metrics._internal.aggregation import ( + AggregationTemporality, + DefaultAggregation, +) +from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.instrument import ( + Counter, + Gauge, + Histogram, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, + _Counter, + _Gauge, + _Histogram, + _ObservableCounter, + _ObservableGauge, + _ObservableUpDownCounter, + _UpDownCounter, +) +from opentelemetry.sdk.metrics._internal.point import MetricsData +from opentelemetry.util._once import Once + +_logger = getLogger(__name__) + + +class MetricExportResult(Enum): + """Result of exporting a metric + + Can be any of the following values:""" + + SUCCESS = 0 + FAILURE = 1 + + +class MetricExporter(ABC): + """Interface for exporting metrics. + + Interface to be implemented by services that want to export metrics received + in their own format. + + Args: + preferred_temporality: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to + configure exporter level preferred temporality. See `opentelemetry.sdk.metrics.export.MetricReader` for + more details on what preferred temporality is. + preferred_aggregation: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to + configure exporter level preferred aggregation. See `opentelemetry.sdk.metrics.export.MetricReader` for + more details on what preferred aggregation is. + """ + + def __init__( + self, + preferred_temporality: dict[type, AggregationTemporality] + | None = None, + preferred_aggregation: dict[ + type, "opentelemetry.sdk.metrics.view.Aggregation" + ] + | None = None, + ) -> None: + self._preferred_temporality = preferred_temporality + self._preferred_aggregation = preferred_aggregation + + @abstractmethod + def export( + self, + metrics_data: MetricsData, + timeout_millis: float = 10_000, + **kwargs, + ) -> MetricExportResult: + """Exports a batch of telemetry data. + + Args: + metrics: The list of `opentelemetry.sdk.metrics.export.Metric` objects to be exported + + Returns: + The result of the export + """ + + @abstractmethod + def force_flush(self, timeout_millis: float = 10_000) -> bool: + """ + Ensure that export of any metrics currently received by the exporter + are completed as soon as possible. + """ + + @abstractmethod + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + """Shuts down the exporter. + + Called when the SDK is shut down. + """ + + +class ConsoleMetricExporter(MetricExporter): + """Implementation of :class:`MetricExporter` that prints metrics to the + console. + + This class can be used for diagnostic purposes. It prints the exported + metrics to the console STDOUT. + """ + + def __init__( + self, + out: IO = stdout, + formatter: Callable[ + ["opentelemetry.sdk.metrics.export.MetricsData"], str + ] = lambda metrics_data: metrics_data.to_json() + linesep, + preferred_temporality: dict[type, AggregationTemporality] + | None = None, + preferred_aggregation: dict[ + type, "opentelemetry.sdk.metrics.view.Aggregation" + ] + | None = None, + ): + super().__init__( + preferred_temporality=preferred_temporality, + preferred_aggregation=preferred_aggregation, + ) + self.out = out + self.formatter = formatter + + def export( + self, + metrics_data: MetricsData, + timeout_millis: float = 10_000, + **kwargs, + ) -> MetricExportResult: + self.out.write(self.formatter(metrics_data)) + self.out.flush() + return MetricExportResult.SUCCESS + + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + pass + + def force_flush(self, timeout_millis: float = 10_000) -> bool: + return True + + +class MetricReader(ABC): + # pylint: disable=too-many-branches,broad-exception-raised + """ + Base class for all metric readers + + Args: + preferred_temporality: A mapping between instrument classes and + aggregation temporality. By default uses CUMULATIVE for all instrument + classes. This mapping will be used to define the default aggregation + temporality of every instrument class. If the user wants to make a + change in the default aggregation temporality of an instrument class, + it is enough to pass here a dictionary whose keys are the instrument + classes and the values are the corresponding desired aggregation + temporalities of the classes that the user wants to change, not all of + them. The classes not included in the passed dictionary will retain + their association to their default aggregation temporalities. + preferred_aggregation: A mapping between instrument classes and + aggregation instances. By default maps all instrument classes to an + instance of `DefaultAggregation`. This mapping will be used to + define the default aggregation of every instrument class. If the + user wants to make a change in the default aggregation of an + instrument class, it is enough to pass here a dictionary whose keys + are the instrument classes and the values are the corresponding + desired aggregation for the instrument classes that the user wants + to change, not necessarily all of them. The classes not included in + the passed dictionary will retain their association to their + default aggregations. The aggregation defined here will be + overridden by an aggregation defined by a view that is not + `DefaultAggregation`. + + .. document protected _receive_metrics which is a intended to be overridden by subclass + .. automethod:: _receive_metrics + """ + + def __init__( + self, + preferred_temporality: dict[type, AggregationTemporality] + | None = None, + preferred_aggregation: dict[ + type, "opentelemetry.sdk.metrics.view.Aggregation" + ] + | None = None, + ) -> None: + self._collect: Callable[ + [ + "opentelemetry.sdk.metrics.export.MetricReader", + AggregationTemporality, + ], + Iterable["opentelemetry.sdk.metrics.export.Metric"], + ] = None + + self._instrument_class_temporality = { + _Counter: AggregationTemporality.CUMULATIVE, + _UpDownCounter: AggregationTemporality.CUMULATIVE, + _Histogram: AggregationTemporality.CUMULATIVE, + _Gauge: AggregationTemporality.CUMULATIVE, + _ObservableCounter: AggregationTemporality.CUMULATIVE, + _ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, + _ObservableGauge: AggregationTemporality.CUMULATIVE, + } + + if preferred_temporality is not None: + for temporality in preferred_temporality.values(): + if temporality not in ( + AggregationTemporality.CUMULATIVE, + AggregationTemporality.DELTA, + ): + raise Exception( + f"Invalid temporality value found {temporality}" + ) + + if preferred_temporality is not None: + for typ, temporality in preferred_temporality.items(): + if typ is Counter: + self._instrument_class_temporality[_Counter] = temporality + elif typ is UpDownCounter: + self._instrument_class_temporality[_UpDownCounter] = ( + temporality + ) + elif typ is Histogram: + self._instrument_class_temporality[_Histogram] = ( + temporality + ) + elif typ is Gauge: + self._instrument_class_temporality[_Gauge] = temporality + elif typ is ObservableCounter: + self._instrument_class_temporality[_ObservableCounter] = ( + temporality + ) + elif typ is ObservableUpDownCounter: + self._instrument_class_temporality[ + _ObservableUpDownCounter + ] = temporality + elif typ is ObservableGauge: + self._instrument_class_temporality[_ObservableGauge] = ( + temporality + ) + else: + raise Exception(f"Invalid instrument class found {typ}") + + self._preferred_temporality = preferred_temporality + self._instrument_class_aggregation = { + _Counter: DefaultAggregation(), + _UpDownCounter: DefaultAggregation(), + _Histogram: DefaultAggregation(), + _Gauge: DefaultAggregation(), + _ObservableCounter: DefaultAggregation(), + _ObservableUpDownCounter: DefaultAggregation(), + _ObservableGauge: DefaultAggregation(), + } + + if preferred_aggregation is not None: + for typ, aggregation in preferred_aggregation.items(): + if typ is Counter: + self._instrument_class_aggregation[_Counter] = aggregation + elif typ is UpDownCounter: + self._instrument_class_aggregation[_UpDownCounter] = ( + aggregation + ) + elif typ is Histogram: + self._instrument_class_aggregation[_Histogram] = ( + aggregation + ) + elif typ is Gauge: + self._instrument_class_aggregation[_Gauge] = aggregation + elif typ is ObservableCounter: + self._instrument_class_aggregation[_ObservableCounter] = ( + aggregation + ) + elif typ is ObservableUpDownCounter: + self._instrument_class_aggregation[ + _ObservableUpDownCounter + ] = aggregation + elif typ is ObservableGauge: + self._instrument_class_aggregation[_ObservableGauge] = ( + aggregation + ) + else: + raise Exception(f"Invalid instrument class found {typ}") + + @final + def collect(self, timeout_millis: float = 10_000) -> None: + """Collects the metrics from the internal SDK state and + invokes the `_receive_metrics` with the collection. + + Args: + timeout_millis: Amount of time in milliseconds before this function + raises a timeout error. + + If any of the underlying ``collect`` methods called by this method + fails by any reason (including timeout) an exception will be raised + detailing the individual errors that caused this function to fail. + """ + if self._collect is None: + _logger.warning( + "Cannot call collect on a MetricReader until it is registered on a MeterProvider" + ) + return + + metrics = self._collect(self, timeout_millis=timeout_millis) + + if metrics is not None: + self._receive_metrics( + metrics, + timeout_millis=timeout_millis, + ) + + @final + def _set_collect_callback( + self, + func: Callable[ + [ + "opentelemetry.sdk.metrics.export.MetricReader", + AggregationTemporality, + ], + Iterable["opentelemetry.sdk.metrics.export.Metric"], + ], + ) -> None: + """This function is internal to the SDK. It should not be called or overridden by users""" + self._collect = func + + @abstractmethod + def _receive_metrics( + self, + metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", + timeout_millis: float = 10_000, + **kwargs, + ) -> None: + """Called by `MetricReader.collect` when it receives a batch of metrics""" + + def force_flush(self, timeout_millis: float = 10_000) -> bool: + self.collect(timeout_millis=timeout_millis) + return True + + @abstractmethod + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + """Shuts down the MetricReader. This method provides a way + for the MetricReader to do any cleanup required. A metric reader can + only be shutdown once, any subsequent calls are ignored and return + failure status. + + When a `MetricReader` is registered on a + :class:`~opentelemetry.sdk.metrics.MeterProvider`, + :meth:`~opentelemetry.sdk.metrics.MeterProvider.shutdown` will invoke this + automatically. + """ + + +class InMemoryMetricReader(MetricReader): + """Implementation of `MetricReader` that returns its metrics from :func:`get_metrics_data`. + + This is useful for e.g. unit tests. + """ + + def __init__( + self, + preferred_temporality: dict[type, AggregationTemporality] + | None = None, + preferred_aggregation: dict[ + type, "opentelemetry.sdk.metrics.view.Aggregation" + ] + | None = None, + ) -> None: + super().__init__( + preferred_temporality=preferred_temporality, + preferred_aggregation=preferred_aggregation, + ) + self._lock = RLock() + self._metrics_data: "opentelemetry.sdk.metrics.export.MetricsData" = ( + None + ) + + def get_metrics_data( + self, + ) -> Optional["opentelemetry.sdk.metrics.export.MetricsData"]: + """Reads and returns current metrics from the SDK""" + with self._lock: + self.collect() + metrics_data = self._metrics_data + self._metrics_data = None + return metrics_data + + def _receive_metrics( + self, + metrics_data: "opentelemetry.sdk.metrics.export.MetricsData", + timeout_millis: float = 10_000, + **kwargs, + ) -> None: + with self._lock: + self._metrics_data = metrics_data + + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + pass + + +class PeriodicExportingMetricReader(MetricReader): + """`PeriodicExportingMetricReader` is an implementation of `MetricReader` + that collects metrics based on a user-configurable time interval, and passes the + metrics to the configured exporter. If the time interval is set to `math.inf`, the + reader will not invoke periodic collection. + + The configured exporter's :py:meth:`~MetricExporter.export` method will not be called + concurrently. + """ + + def __init__( + self, + exporter: MetricExporter, + export_interval_millis: Optional[float] = None, + export_timeout_millis: Optional[float] = None, + ) -> None: + # PeriodicExportingMetricReader defers to exporter for configuration + super().__init__( + preferred_temporality=exporter._preferred_temporality, + preferred_aggregation=exporter._preferred_aggregation, + ) + + # This lock is held whenever calling self._exporter.export() to prevent concurrent + # execution of MetricExporter.export() + # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch + self._export_lock = Lock() + + self._exporter = exporter + if export_interval_millis is None: + try: + export_interval_millis = float( + environ.get(OTEL_METRIC_EXPORT_INTERVAL, 60000) + ) + except ValueError: + _logger.warning( + "Found invalid value for export interval, using default" + ) + export_interval_millis = 60000 + if export_timeout_millis is None: + try: + export_timeout_millis = float( + environ.get(OTEL_METRIC_EXPORT_TIMEOUT, 30000) + ) + except ValueError: + _logger.warning( + "Found invalid value for export timeout, using default" + ) + export_timeout_millis = 30000 + self._export_interval_millis = export_interval_millis + self._export_timeout_millis = export_timeout_millis + self._shutdown = False + self._shutdown_event = Event() + self._shutdown_once = Once() + self._daemon_thread = None + if ( + self._export_interval_millis > 0 + and self._export_interval_millis < math.inf + ): + self._daemon_thread = Thread( + name="OtelPeriodicExportingMetricReader", + target=self._ticker, + daemon=True, + ) + self._daemon_thread.start() + if hasattr(os, "register_at_fork"): + weak_at_fork = weakref.WeakMethod(self._at_fork_reinit) + + os.register_at_fork( + after_in_child=lambda: weak_at_fork()() # pylint: disable=unnecessary-lambda, protected-access + ) + elif self._export_interval_millis <= 0: + raise ValueError( + f"interval value {self._export_interval_millis} is invalid \ + and needs to be larger than zero." + ) + + def _at_fork_reinit(self): + self._daemon_thread = Thread( + name="OtelPeriodicExportingMetricReader", + target=self._ticker, + daemon=True, + ) + self._daemon_thread.start() + + def _ticker(self) -> None: + interval_secs = self._export_interval_millis / 1e3 + while not self._shutdown_event.wait(interval_secs): + try: + self.collect(timeout_millis=self._export_timeout_millis) + except MetricsTimeoutError: + _logger.warning( + "Metric collection timed out. Will try again after %s seconds", + interval_secs, + exc_info=True, + ) + # one last collection below before shutting down completely + try: + self.collect(timeout_millis=self._export_interval_millis) + except MetricsTimeoutError: + _logger.warning( + "Metric collection timed out.", + exc_info=True, + ) + + def _receive_metrics( + self, + metrics_data: MetricsData, + timeout_millis: float = 10_000, + **kwargs, + ) -> None: + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + # pylint: disable=broad-exception-caught,invalid-name + try: + with self._export_lock: + self._exporter.export( + metrics_data, timeout_millis=timeout_millis + ) + except Exception: + _logger.exception("Exception while exporting metrics") + detach(token) + + def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: + deadline_ns = time_ns() + timeout_millis * 10**6 + + def _shutdown(): + self._shutdown = True + + did_set = self._shutdown_once.do_once(_shutdown) + if not did_set: + _logger.warning("Can't shutdown multiple times") + return + + self._shutdown_event.set() + if self._daemon_thread: + self._daemon_thread.join(timeout=(deadline_ns - time_ns()) / 10**9) + self._exporter.shutdown(timeout=(deadline_ns - time_ns()) / 10**6) + + def force_flush(self, timeout_millis: float = 10_000) -> bool: + super().force_flush(timeout_millis=timeout_millis) + self._exporter.force_flush(timeout_millis=timeout_millis) + return True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py new file mode 100644 index 00000000..b01578f4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/instrument.py @@ -0,0 +1,334 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-ancestors, unused-import +from __future__ import annotations + +from logging import getLogger +from time import time_ns +from typing import Generator, Iterable, List, Sequence, Union + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics +from opentelemetry.context import Context, get_current +from opentelemetry.metrics import CallbackT +from opentelemetry.metrics import Counter as APICounter +from opentelemetry.metrics import Histogram as APIHistogram +from opentelemetry.metrics import ObservableCounter as APIObservableCounter +from opentelemetry.metrics import ObservableGauge as APIObservableGauge +from opentelemetry.metrics import ( + ObservableUpDownCounter as APIObservableUpDownCounter, +) +from opentelemetry.metrics import UpDownCounter as APIUpDownCounter +from opentelemetry.metrics import _Gauge as APIGauge +from opentelemetry.metrics._internal.instrument import ( + CallbackOptions, + _MetricsHistogramAdvisory, +) +from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.util.instrumentation import InstrumentationScope + +_logger = getLogger(__name__) + + +_ERROR_MESSAGE = ( + "Expected ASCII string of maximum length 63 characters but got {}" +) + + +class _Synchronous: + def __init__( + self, + name: str, + instrumentation_scope: InstrumentationScope, + measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", + unit: str = "", + description: str = "", + ): + # pylint: disable=no-member + result = self._check_name_unit_description(name, unit, description) + + if result["name"] is None: + # pylint: disable=broad-exception-raised + raise Exception(_ERROR_MESSAGE.format(name)) + + if result["unit"] is None: + # pylint: disable=broad-exception-raised + raise Exception(_ERROR_MESSAGE.format(unit)) + + name = result["name"] + unit = result["unit"] + description = result["description"] + + self.name = name.lower() + self.unit = unit + self.description = description + self.instrumentation_scope = instrumentation_scope + self._measurement_consumer = measurement_consumer + super().__init__(name, unit=unit, description=description) + + +class _Asynchronous: + def __init__( + self, + name: str, + instrumentation_scope: InstrumentationScope, + measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", + callbacks: Iterable[CallbackT] | None = None, + unit: str = "", + description: str = "", + ): + # pylint: disable=no-member + result = self._check_name_unit_description(name, unit, description) + + if result["name"] is None: + # pylint: disable=broad-exception-raised + raise Exception(_ERROR_MESSAGE.format(name)) + + if result["unit"] is None: + # pylint: disable=broad-exception-raised + raise Exception(_ERROR_MESSAGE.format(unit)) + + name = result["name"] + unit = result["unit"] + description = result["description"] + + self.name = name.lower() + self.unit = unit + self.description = description + self.instrumentation_scope = instrumentation_scope + self._measurement_consumer = measurement_consumer + super().__init__(name, callbacks, unit=unit, description=description) + + self._callbacks: List[CallbackT] = [] + + if callbacks is not None: + for callback in callbacks: + if isinstance(callback, Generator): + # advance generator to it's first yield + next(callback) + + def inner( + options: CallbackOptions, + callback=callback, + ) -> Iterable[Measurement]: + try: + return callback.send(options) + except StopIteration: + return [] + + self._callbacks.append(inner) + else: + self._callbacks.append(callback) + + def callback( + self, callback_options: CallbackOptions + ) -> Iterable[Measurement]: + for callback in self._callbacks: + try: + for api_measurement in callback(callback_options): + yield Measurement( + api_measurement.value, + time_unix_nano=time_ns(), + instrument=self, + context=api_measurement.context or get_current(), + attributes=api_measurement.attributes, + ) + except Exception: # pylint: disable=broad-exception-caught + _logger.exception( + "Callback failed for instrument %s.", self.name + ) + + +class Counter(_Synchronous, APICounter): + def __new__(cls, *args, **kwargs): + if cls is Counter: + raise TypeError("Counter must be instantiated via a meter.") + return super().__new__(cls) + + def add( + self, + amount: Union[int, float], + attributes: dict[str, str] | None = None, + context: Context | None = None, + ): + if amount < 0: + _logger.warning( + "Add amount must be non-negative on Counter %s.", self.name + ) + return + time_unix_nano = time_ns() + self._measurement_consumer.consume_measurement( + Measurement( + amount, + time_unix_nano, + self, + context or get_current(), + attributes, + ) + ) + + +class UpDownCounter(_Synchronous, APIUpDownCounter): + def __new__(cls, *args, **kwargs): + if cls is UpDownCounter: + raise TypeError("UpDownCounter must be instantiated via a meter.") + return super().__new__(cls) + + def add( + self, + amount: Union[int, float], + attributes: dict[str, str] | None = None, + context: Context | None = None, + ): + time_unix_nano = time_ns() + self._measurement_consumer.consume_measurement( + Measurement( + amount, + time_unix_nano, + self, + context or get_current(), + attributes, + ) + ) + + +class ObservableCounter(_Asynchronous, APIObservableCounter): + def __new__(cls, *args, **kwargs): + if cls is ObservableCounter: + raise TypeError( + "ObservableCounter must be instantiated via a meter." + ) + return super().__new__(cls) + + +class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter): + def __new__(cls, *args, **kwargs): + if cls is ObservableUpDownCounter: + raise TypeError( + "ObservableUpDownCounter must be instantiated via a meter." + ) + return super().__new__(cls) + + +class Histogram(_Synchronous, APIHistogram): + def __init__( + self, + name: str, + instrumentation_scope: InstrumentationScope, + measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", + unit: str = "", + description: str = "", + explicit_bucket_boundaries_advisory: Sequence[float] | None = None, + ): + super().__init__( + name, + unit=unit, + description=description, + instrumentation_scope=instrumentation_scope, + measurement_consumer=measurement_consumer, + ) + self._advisory = _MetricsHistogramAdvisory( + explicit_bucket_boundaries=explicit_bucket_boundaries_advisory + ) + + def __new__(cls, *args, **kwargs): + if cls is Histogram: + raise TypeError("Histogram must be instantiated via a meter.") + return super().__new__(cls) + + def record( + self, + amount: Union[int, float], + attributes: dict[str, str] | None = None, + context: Context | None = None, + ): + if amount < 0: + _logger.warning( + "Record amount must be non-negative on Histogram %s.", + self.name, + ) + return + time_unix_nano = time_ns() + self._measurement_consumer.consume_measurement( + Measurement( + amount, + time_unix_nano, + self, + context or get_current(), + attributes, + ) + ) + + +class Gauge(_Synchronous, APIGauge): + def __new__(cls, *args, **kwargs): + if cls is Gauge: + raise TypeError("Gauge must be instantiated via a meter.") + return super().__new__(cls) + + def set( + self, + amount: Union[int, float], + attributes: dict[str, str] | None = None, + context: Context | None = None, + ): + time_unix_nano = time_ns() + self._measurement_consumer.consume_measurement( + Measurement( + amount, + time_unix_nano, + self, + context or get_current(), + attributes, + ) + ) + + +class ObservableGauge(_Asynchronous, APIObservableGauge): + def __new__(cls, *args, **kwargs): + if cls is ObservableGauge: + raise TypeError( + "ObservableGauge must be instantiated via a meter." + ) + return super().__new__(cls) + + +# Below classes exist to prevent the direct instantiation +class _Counter(Counter): + pass + + +class _UpDownCounter(UpDownCounter): + pass + + +class _ObservableCounter(ObservableCounter): + pass + + +class _ObservableUpDownCounter(ObservableUpDownCounter): + pass + + +class _Histogram(Histogram): + pass + + +class _Gauge(Gauge): + pass + + +class _ObservableGauge(ObservableGauge): + pass diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py new file mode 100644 index 00000000..56619a83 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement.py @@ -0,0 +1,45 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Union + +from opentelemetry.context import Context +from opentelemetry.metrics import Instrument +from opentelemetry.util.types import Attributes + + +@dataclass(frozen=True) +class Measurement: + """ + Represents a data point reported via the metrics API to the SDK. + + Attributes + value: Measured value + time_unix_nano: The time the API call was made to record the Measurement + instrument: The instrument that produced this `Measurement`. + context: The active Context of the Measurement at API call time. + attributes: Measurement attributes + """ + + # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated + # one will come from napoleon extension and the other from autodoc extension. This + # will raise an sphinx error of duplicated object description + # See https://github.com/sphinx-doc/sphinx/issues/8664 + + value: Union[int, float] + time_unix_nano: int + instrument: Instrument + context: Context + attributes: Attributes = None diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py new file mode 100644 index 00000000..c6510330 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/measurement_consumer.py @@ -0,0 +1,145 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=unused-import + +from abc import ABC, abstractmethod +from threading import Lock +from time import time_ns +from typing import Iterable, List, Mapping, Optional + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics +import opentelemetry.sdk.metrics._internal.instrument +import opentelemetry.sdk.metrics._internal.sdk_configuration +from opentelemetry.metrics._internal.instrument import CallbackOptions +from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError +from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.metric_reader_storage import ( + MetricReaderStorage, +) +from opentelemetry.sdk.metrics._internal.point import Metric + + +class MeasurementConsumer(ABC): + @abstractmethod + def consume_measurement(self, measurement: Measurement) -> None: + pass + + @abstractmethod + def register_asynchronous_instrument( + self, + instrument: ( + "opentelemetry.sdk.metrics._internal.instrument_Asynchronous" + ), + ): + pass + + @abstractmethod + def collect( + self, + metric_reader: "opentelemetry.sdk.metrics.MetricReader", + timeout_millis: float = 10_000, + ) -> Optional[Iterable[Metric]]: + pass + + +class SynchronousMeasurementConsumer(MeasurementConsumer): + def __init__( + self, + sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration", + ) -> None: + self._lock = Lock() + self._sdk_config = sdk_config + # should never be mutated + self._reader_storages: Mapping[ + "opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage + ] = { + reader: MetricReaderStorage( + sdk_config, + reader._instrument_class_temporality, + reader._instrument_class_aggregation, + ) + for reader in sdk_config.metric_readers + } + self._async_instruments: List[ + "opentelemetry.sdk.metrics._internal.instrument._Asynchronous" + ] = [] + + def consume_measurement(self, measurement: Measurement) -> None: + should_sample_exemplar = ( + self._sdk_config.exemplar_filter.should_sample( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ) + ) + for reader_storage in self._reader_storages.values(): + reader_storage.consume_measurement( + measurement, should_sample_exemplar + ) + + def register_asynchronous_instrument( + self, + instrument: ( + "opentelemetry.sdk.metrics._internal.instrument._Asynchronous" + ), + ) -> None: + with self._lock: + self._async_instruments.append(instrument) + + def collect( + self, + metric_reader: "opentelemetry.sdk.metrics.MetricReader", + timeout_millis: float = 10_000, + ) -> Optional[Iterable[Metric]]: + with self._lock: + metric_reader_storage = self._reader_storages[metric_reader] + # for now, just use the defaults + callback_options = CallbackOptions() + deadline_ns = time_ns() + (timeout_millis * 1e6) + + default_timeout_ns = 10000 * 1e6 + + for async_instrument in self._async_instruments: + remaining_time = deadline_ns - time_ns() + + if remaining_time < default_timeout_ns: + callback_options = CallbackOptions( + timeout_millis=remaining_time / 1e6 + ) + + measurements = async_instrument.callback(callback_options) + if time_ns() >= deadline_ns: + raise MetricsTimeoutError( + "Timed out while executing callback" + ) + + for measurement in measurements: + should_sample_exemplar = ( + self._sdk_config.exemplar_filter.should_sample( + measurement.value, + measurement.time_unix_nano, + measurement.attributes, + measurement.context, + ) + ) + metric_reader_storage.consume_measurement( + measurement, should_sample_exemplar + ) + + result = self._reader_storages[metric_reader].collect() + + return result diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py new file mode 100644 index 00000000..f5121811 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py @@ -0,0 +1,315 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from threading import RLock +from time import time_ns +from typing import Dict, List, Optional + +from opentelemetry.metrics import ( + Asynchronous, + Counter, + Instrument, + ObservableCounter, +) +from opentelemetry.sdk.metrics._internal._view_instrument_match import ( + _ViewInstrumentMatch, +) +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + ExplicitBucketHistogramAggregation, + _DropAggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, + _LastValueAggregation, + _SumAggregation, +) +from opentelemetry.sdk.metrics._internal.export import AggregationTemporality +from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics._internal.point import ( + ExponentialHistogram, + Gauge, + Histogram, + Metric, + MetricsData, + ResourceMetrics, + ScopeMetrics, + Sum, +) +from opentelemetry.sdk.metrics._internal.sdk_configuration import ( + SdkConfiguration, +) +from opentelemetry.sdk.metrics._internal.view import View +from opentelemetry.sdk.util.instrumentation import InstrumentationScope + +_logger = getLogger(__name__) + +_DEFAULT_VIEW = View(instrument_name="") + + +class MetricReaderStorage: + """The SDK's storage for a given reader""" + + def __init__( + self, + sdk_config: SdkConfiguration, + instrument_class_temporality: Dict[type, AggregationTemporality], + instrument_class_aggregation: Dict[type, Aggregation], + ) -> None: + self._lock = RLock() + self._sdk_config = sdk_config + self._instrument_view_instrument_matches: Dict[ + Instrument, List[_ViewInstrumentMatch] + ] = {} + self._instrument_class_temporality = instrument_class_temporality + self._instrument_class_aggregation = instrument_class_aggregation + + def _get_or_init_view_instrument_match( + self, instrument: Instrument + ) -> List[_ViewInstrumentMatch]: + # Optimistically get the relevant views for the given instrument. Once set for a given + # instrument, the mapping will never change + + if instrument in self._instrument_view_instrument_matches: + return self._instrument_view_instrument_matches[instrument] + + with self._lock: + # double check if it was set before we held the lock + if instrument in self._instrument_view_instrument_matches: + return self._instrument_view_instrument_matches[instrument] + + # not present, hold the lock and add a new mapping + view_instrument_matches = [] + + self._handle_view_instrument_match( + instrument, view_instrument_matches + ) + + # if no view targeted the instrument, use the default + if not view_instrument_matches: + view_instrument_matches.append( + _ViewInstrumentMatch( + view=_DEFAULT_VIEW, + instrument=instrument, + instrument_class_aggregation=( + self._instrument_class_aggregation + ), + ) + ) + self._instrument_view_instrument_matches[instrument] = ( + view_instrument_matches + ) + + return view_instrument_matches + + def consume_measurement( + self, measurement: Measurement, should_sample_exemplar: bool = True + ) -> None: + for view_instrument_match in self._get_or_init_view_instrument_match( + measurement.instrument + ): + view_instrument_match.consume_measurement( + measurement, should_sample_exemplar + ) + + def collect(self) -> Optional[MetricsData]: + # Use a list instead of yielding to prevent a slow reader from holding + # SDK locks + + # While holding the lock, new _ViewInstrumentMatch can't be added from + # another thread (so we are sure we collect all existing view). + # However, instruments can still send measurements that will make it + # into the individual aggregations; collection will acquire those locks + # iteratively to keep locking as fine-grained as possible. One side + # effect is that end times can be slightly skewed among the metric + # streams produced by the SDK, but we still align the output timestamps + # for a single instrument. + + collection_start_nanos = time_ns() + + with self._lock: + instrumentation_scope_scope_metrics: Dict[ + InstrumentationScope, ScopeMetrics + ] = {} + + for ( + instrument, + view_instrument_matches, + ) in self._instrument_view_instrument_matches.items(): + aggregation_temporality = self._instrument_class_temporality[ + instrument.__class__ + ] + + metrics: List[Metric] = [] + + for view_instrument_match in view_instrument_matches: + data_points = view_instrument_match.collect( + aggregation_temporality, collection_start_nanos + ) + + if data_points is None: + continue + + if isinstance( + # pylint: disable=protected-access + view_instrument_match._aggregation, + _SumAggregation, + ): + data = Sum( + aggregation_temporality=aggregation_temporality, + data_points=data_points, + is_monotonic=isinstance( + instrument, (Counter, ObservableCounter) + ), + ) + elif isinstance( + # pylint: disable=protected-access + view_instrument_match._aggregation, + _LastValueAggregation, + ): + data = Gauge(data_points=data_points) + elif isinstance( + # pylint: disable=protected-access + view_instrument_match._aggregation, + _ExplicitBucketHistogramAggregation, + ): + data = Histogram( + data_points=data_points, + aggregation_temporality=aggregation_temporality, + ) + elif isinstance( + # pylint: disable=protected-access + view_instrument_match._aggregation, + _DropAggregation, + ): + continue + + elif isinstance( + # pylint: disable=protected-access + view_instrument_match._aggregation, + _ExponentialBucketHistogramAggregation, + ): + data = ExponentialHistogram( + data_points=data_points, + aggregation_temporality=aggregation_temporality, + ) + + metrics.append( + Metric( + # pylint: disable=protected-access + # pylint: disable=possibly-used-before-assignment + name=view_instrument_match._name, + description=view_instrument_match._description, + unit=view_instrument_match._instrument.unit, + data=data, + ) + ) + + if metrics: + if instrument.instrumentation_scope not in ( + instrumentation_scope_scope_metrics + ): + instrumentation_scope_scope_metrics[ + instrument.instrumentation_scope + ] = ScopeMetrics( + scope=instrument.instrumentation_scope, + metrics=metrics, + schema_url=instrument.instrumentation_scope.schema_url, + ) + else: + instrumentation_scope_scope_metrics[ + instrument.instrumentation_scope + ].metrics.extend(metrics) + + if instrumentation_scope_scope_metrics: + return MetricsData( + resource_metrics=[ + ResourceMetrics( + resource=self._sdk_config.resource, + scope_metrics=list( + instrumentation_scope_scope_metrics.values() + ), + schema_url=self._sdk_config.resource.schema_url, + ) + ] + ) + + return None + + def _handle_view_instrument_match( + self, + instrument: Instrument, + view_instrument_matches: List["_ViewInstrumentMatch"], + ) -> None: + for view in self._sdk_config.views: + # pylint: disable=protected-access + if not view._match(instrument): + continue + + if not self._check_view_instrument_compatibility(view, instrument): + continue + + new_view_instrument_match = _ViewInstrumentMatch( + view=view, + instrument=instrument, + instrument_class_aggregation=( + self._instrument_class_aggregation + ), + ) + + for ( + existing_view_instrument_matches + ) in self._instrument_view_instrument_matches.values(): + for ( + existing_view_instrument_match + ) in existing_view_instrument_matches: + if existing_view_instrument_match.conflicts( + new_view_instrument_match + ): + _logger.warning( + "Views %s and %s will cause conflicting " + "metrics identities", + existing_view_instrument_match._view, + new_view_instrument_match._view, + ) + + view_instrument_matches.append(new_view_instrument_match) + + @staticmethod + def _check_view_instrument_compatibility( + view: View, instrument: Instrument + ) -> bool: + """ + Checks if a view and an instrument are compatible. + + Returns `true` if they are compatible and a `_ViewInstrumentMatch` + object should be created, `false` otherwise. + """ + + result = True + + # pylint: disable=protected-access + if isinstance(instrument, Asynchronous) and isinstance( + view._aggregation, ExplicitBucketHistogramAggregation + ): + _logger.warning( + "View %s and instrument %s will produce " + "semantic errors when matched, the view " + "has not been applied.", + view, + instrument, + ) + result = False + + return result diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py new file mode 100644 index 00000000..8c7e3469 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/point.py @@ -0,0 +1,277 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=unused-import + +from dataclasses import asdict, dataclass, field +from json import dumps, loads +from typing import Optional, Sequence, Union + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics._internal +from opentelemetry.sdk.metrics._internal.exemplar import Exemplar +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.util.instrumentation import InstrumentationScope +from opentelemetry.util.types import Attributes + + +@dataclass(frozen=True) +class NumberDataPoint: + """Single data point in a timeseries that describes the time-varying scalar + value of a metric. + """ + + attributes: Attributes + start_time_unix_nano: int + time_unix_nano: int + value: Union[int, float] + exemplars: Sequence[Exemplar] = field(default_factory=list) + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps(asdict(self), indent=indent) + + +@dataclass(frozen=True) +class HistogramDataPoint: + """Single data point in a timeseries that describes the time-varying scalar + value of a metric. + """ + + attributes: Attributes + start_time_unix_nano: int + time_unix_nano: int + count: int + sum: Union[int, float] + bucket_counts: Sequence[int] + explicit_bounds: Sequence[float] + min: float + max: float + exemplars: Sequence[Exemplar] = field(default_factory=list) + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps(asdict(self), indent=indent) + + +@dataclass(frozen=True) +class Buckets: + offset: int + bucket_counts: Sequence[int] + + +@dataclass(frozen=True) +class ExponentialHistogramDataPoint: + """Single data point in a timeseries whose boundaries are defined by an + exponential function. This timeseries describes the time-varying scalar + value of a metric. + """ + + attributes: Attributes + start_time_unix_nano: int + time_unix_nano: int + count: int + sum: Union[int, float] + scale: int + zero_count: int + positive: Buckets + negative: Buckets + flags: int + min: float + max: float + exemplars: Sequence[Exemplar] = field(default_factory=list) + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps(asdict(self), indent=indent) + + +@dataclass(frozen=True) +class ExponentialHistogram: + """Represents the type of a metric that is calculated by aggregating as an + ExponentialHistogram of all reported measurements over a time interval. + """ + + data_points: Sequence[ExponentialHistogramDataPoint] + aggregation_temporality: ( + "opentelemetry.sdk.metrics.export.AggregationTemporality" + ) + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "data_points": [ + loads(data_point.to_json(indent=indent)) + for data_point in self.data_points + ], + "aggregation_temporality": self.aggregation_temporality, + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class Sum: + """Represents the type of a scalar metric that is calculated as a sum of + all reported measurements over a time interval.""" + + data_points: Sequence[NumberDataPoint] + aggregation_temporality: ( + "opentelemetry.sdk.metrics.export.AggregationTemporality" + ) + is_monotonic: bool + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "data_points": [ + loads(data_point.to_json(indent=indent)) + for data_point in self.data_points + ], + "aggregation_temporality": self.aggregation_temporality, + "is_monotonic": self.is_monotonic, + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class Gauge: + """Represents the type of a scalar metric that always exports the current + value for every data point. It should be used for an unknown + aggregation.""" + + data_points: Sequence[NumberDataPoint] + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "data_points": [ + loads(data_point.to_json(indent=indent)) + for data_point in self.data_points + ], + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class Histogram: + """Represents the type of a metric that is calculated by aggregating as a + histogram of all reported measurements over a time interval.""" + + data_points: Sequence[HistogramDataPoint] + aggregation_temporality: ( + "opentelemetry.sdk.metrics.export.AggregationTemporality" + ) + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "data_points": [ + loads(data_point.to_json(indent=indent)) + for data_point in self.data_points + ], + "aggregation_temporality": self.aggregation_temporality, + }, + indent=indent, + ) + + +# pylint: disable=invalid-name +DataT = Union[Sum, Gauge, Histogram, ExponentialHistogram] +DataPointT = Union[ + NumberDataPoint, HistogramDataPoint, ExponentialHistogramDataPoint +] + + +@dataclass(frozen=True) +class Metric: + """Represents a metric point in the OpenTelemetry data model to be + exported.""" + + name: str + description: Optional[str] + unit: Optional[str] + data: DataT + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "name": self.name, + "description": self.description or "", + "unit": self.unit or "", + "data": loads(self.data.to_json(indent=indent)), + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class ScopeMetrics: + """A collection of Metrics produced by a scope""" + + scope: InstrumentationScope + metrics: Sequence[Metric] + schema_url: str + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "scope": loads(self.scope.to_json(indent=indent)), + "metrics": [ + loads(metric.to_json(indent=indent)) + for metric in self.metrics + ], + "schema_url": self.schema_url, + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class ResourceMetrics: + """A collection of ScopeMetrics from a Resource""" + + resource: Resource + scope_metrics: Sequence[ScopeMetrics] + schema_url: str + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "resource": loads(self.resource.to_json(indent=indent)), + "scope_metrics": [ + loads(scope_metrics.to_json(indent=indent)) + for scope_metrics in self.scope_metrics + ], + "schema_url": self.schema_url, + }, + indent=indent, + ) + + +@dataclass(frozen=True) +class MetricsData: + """An array of ResourceMetrics""" + + resource_metrics: Sequence[ResourceMetrics] + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "resource_metrics": [ + loads(resource_metrics.to_json(indent=indent)) + for resource_metrics in self.resource_metrics + ] + }, + indent=indent, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py new file mode 100644 index 00000000..3d88facb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/sdk_configuration.py @@ -0,0 +1,30 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=unused-import + +from dataclasses import dataclass +from typing import Sequence + +# This kind of import is needed to avoid Sphinx errors. +import opentelemetry.sdk.metrics +import opentelemetry.sdk.resources + + +@dataclass +class SdkConfiguration: + exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter" + resource: "opentelemetry.sdk.resources.Resource" + metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"] + views: Sequence["opentelemetry.sdk.metrics.View"] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py new file mode 100644 index 00000000..b3fa029d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/_internal/view.py @@ -0,0 +1,195 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from fnmatch import fnmatch +from logging import getLogger +from typing import Callable, Optional, Set, Type + +from opentelemetry.metrics import Instrument +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + _Aggregation, + _ExplicitBucketHistogramAggregation, + _ExponentialBucketHistogramAggregation, +) +from opentelemetry.sdk.metrics._internal.exemplar import ( + AlignedHistogramBucketExemplarReservoir, + ExemplarReservoirBuilder, + SimpleFixedSizeExemplarReservoir, +) + +_logger = getLogger(__name__) + + +def _default_reservoir_factory( + aggregation_type: Type[_Aggregation], +) -> ExemplarReservoirBuilder: + """Default reservoir factory per aggregation.""" + if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation): + return AlignedHistogramBucketExemplarReservoir + if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation): + return SimpleFixedSizeExemplarReservoir + return SimpleFixedSizeExemplarReservoir + + +class View: + """ + A `View` configuration parameters can be used for the following + purposes: + + 1. Match instruments: When an instrument matches a view, measurements + received by that instrument will be processed. + 2. Customize metric streams: A metric stream is identified by a match + between a view and an instrument and a set of attributes. The metric + stream can be customized by certain attributes of the corresponding view. + + The attributes documented next serve one of the previous two purposes. + + Args: + instrument_type: This is an instrument matching attribute: the class the + instrument must be to match the view. + + instrument_name: This is an instrument matching attribute: the name the + instrument must have to match the view. Wild card characters are supported. Wild + card characters should not be used with this attribute if the view has also a + ``name`` defined. + + meter_name: This is an instrument matching attribute: the name the + instrument meter must have to match the view. + + meter_version: This is an instrument matching attribute: the version + the instrument meter must have to match the view. + + meter_schema_url: This is an instrument matching attribute: the schema + URL the instrument meter must have to match the view. + + name: This is a metric stream customizing attribute: the name of the + metric stream. If `None`, the name of the instrument will be used. + + description: This is a metric stream customizing attribute: the + description of the metric stream. If `None`, the description of the instrument will + be used. + + attribute_keys: This is a metric stream customizing attribute: this is + a set of attribute keys. If not `None` then only the measurement attributes that + are in ``attribute_keys`` will be used to identify the metric stream. + + aggregation: This is a metric stream customizing attribute: the + aggregation instance to use when data is aggregated for the + corresponding metrics stream. If `None` an instance of + `DefaultAggregation` will be used. + + exemplar_reservoir_factory: This is a metric stream customizing attribute: + the exemplar reservoir factory + + instrument_unit: This is an instrument matching attribute: the unit the + instrument must have to match the view. + + This class is not intended to be subclassed by the user. + """ + + _default_aggregation = DefaultAggregation() + + def __init__( + self, + instrument_type: Optional[Type[Instrument]] = None, + instrument_name: Optional[str] = None, + meter_name: Optional[str] = None, + meter_version: Optional[str] = None, + meter_schema_url: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + attribute_keys: Optional[Set[str]] = None, + aggregation: Optional[Aggregation] = None, + exemplar_reservoir_factory: Optional[ + Callable[[Type[_Aggregation]], ExemplarReservoirBuilder] + ] = None, + instrument_unit: Optional[str] = None, + ): + if ( + instrument_type + is instrument_name + is instrument_unit + is meter_name + is meter_version + is meter_schema_url + is None + ): + # pylint: disable=broad-exception-raised + raise Exception( + "Some instrument selection " + f"criteria must be provided for View {name}" + ) + + if ( + name is not None + and instrument_name is not None + and ("*" in instrument_name or "?" in instrument_name) + ): + # pylint: disable=broad-exception-raised + raise Exception( + f"View {name} declared with wildcard " + "characters in instrument_name" + ) + + # _name, _description, _aggregation, _exemplar_reservoir_factory and + # _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch. + self._name = name + self._instrument_type = instrument_type + self._instrument_name = instrument_name + self._instrument_unit = instrument_unit + self._meter_name = meter_name + self._meter_version = meter_version + self._meter_schema_url = meter_schema_url + + self._description = description + self._attribute_keys = attribute_keys + self._aggregation = aggregation or self._default_aggregation + self._exemplar_reservoir_factory = ( + exemplar_reservoir_factory or _default_reservoir_factory + ) + + # pylint: disable=too-many-return-statements + # pylint: disable=too-many-branches + def _match(self, instrument: Instrument) -> bool: + if self._instrument_type is not None: + if not isinstance(instrument, self._instrument_type): + return False + + if self._instrument_name is not None: + if not fnmatch(instrument.name, self._instrument_name): + return False + + if self._instrument_unit is not None: + if not fnmatch(instrument.unit, self._instrument_unit): + return False + + if self._meter_name is not None: + if instrument.instrumentation_scope.name != self._meter_name: + return False + + if self._meter_version is not None: + if instrument.instrumentation_scope.version != self._meter_version: + return False + + if self._meter_schema_url is not None: + if ( + instrument.instrumentation_scope.schema_url + != self._meter_schema_url + ): + return False + + return True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py new file mode 100644 index 00000000..478237cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/export/__init__.py @@ -0,0 +1,66 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.sdk.metrics._internal.export import ( + AggregationTemporality, + ConsoleMetricExporter, + InMemoryMetricReader, + MetricExporter, + MetricExportResult, + MetricReader, + PeriodicExportingMetricReader, +) + +# The point module is not in the export directory to avoid a circular import. +from opentelemetry.sdk.metrics._internal.point import ( # noqa: F401 + Buckets, + DataPointT, + DataT, + ExponentialHistogram, + ExponentialHistogramDataPoint, + Gauge, + Histogram, + HistogramDataPoint, + Metric, + MetricsData, + NumberDataPoint, + ResourceMetrics, + ScopeMetrics, + Sum, +) + +__all__ = [ + "AggregationTemporality", + "Buckets", + "ConsoleMetricExporter", + "InMemoryMetricReader", + "MetricExporter", + "MetricExportResult", + "MetricReader", + "PeriodicExportingMetricReader", + "DataPointT", + "DataT", + "ExponentialHistogram", + "ExponentialHistogramDataPoint", + "Gauge", + "Histogram", + "HistogramDataPoint", + "Metric", + "MetricsData", + "NumberDataPoint", + "ResourceMetrics", + "ScopeMetrics", + "Sum", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py new file mode 100644 index 00000000..c07adf6c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/metrics/view/__init__.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.sdk.metrics._internal.aggregation import ( + Aggregation, + DefaultAggregation, + DropAggregation, + ExplicitBucketHistogramAggregation, + ExponentialBucketHistogramAggregation, + LastValueAggregation, + SumAggregation, +) +from opentelemetry.sdk.metrics._internal.view import View + +__all__ = [ + "Aggregation", + "DefaultAggregation", + "DropAggregation", + "ExplicitBucketHistogramAggregation", + "ExponentialBucketHistogramAggregation", + "LastValueAggregation", + "SumAggregation", + "View", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py new file mode 100644 index 00000000..752b9067 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/resources/__init__.py @@ -0,0 +1,541 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This package implements `OpenTelemetry Resources +<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_: + + *A Resource is an immutable representation of the entity producing + telemetry. For example, a process producing telemetry that is running in + a container on Kubernetes has a Pod name, it is in a namespace and + possibly is part of a Deployment which also has a name. All three of + these attributes can be included in the Resource.* + +Resource objects are created with `Resource.create`, which accepts attributes +(key-values). Resources should NOT be created via constructor except by `ResourceDetector` +instances which can't use `Resource.create` to avoid infinite loops. Working with +`Resource` objects should only be done via the Resource API methods. Resource +attributes can also be passed at process invocation in the +:envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should register +your resource with the `opentelemetry.sdk.trace.TracerProvider` by passing +them into their constructors. The `Resource` passed to a provider is available +to the exporter, which can send on this information as it sees fit. + +.. code-block:: python + + trace.set_tracer_provider( + TracerProvider( + resource=Resource.create({ + "service.name": "shoppingcart", + "service.instance.id": "instance-12", + }), + ), + ) + print(trace.get_tracer_provider().resource.attributes) + + {'telemetry.sdk.language': 'python', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '0.13.dev0', + 'service.name': 'shoppingcart', + 'service.instance.id': 'instance-12'} + +Note that the OpenTelemetry project documents certain `"standard attributes" +<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_ +that have prescribed semantic meanings, for example ``service.name`` in the +above example. +""" + +import abc +import concurrent.futures +import logging +import os +import platform +import socket +import sys +import typing +from json import dumps +from os import environ +from types import ModuleType +from typing import List, MutableMapping, Optional, cast +from urllib import parse + +from opentelemetry.attributes import BoundedAttributes +from opentelemetry.sdk.environment_variables import ( + OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, + OTEL_RESOURCE_ATTRIBUTES, + OTEL_SERVICE_NAME, +) +from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.util._importlib_metadata import entry_points, version +from opentelemetry.util.types import AttributeValue + +psutil: Optional[ModuleType] = None + +try: + import psutil as psutil_module + + psutil = psutil_module +except ImportError: + pass + +LabelValue = AttributeValue +Attributes = typing.Mapping[str, LabelValue] +logger = logging.getLogger(__name__) + +CLOUD_PROVIDER = ResourceAttributes.CLOUD_PROVIDER +CLOUD_ACCOUNT_ID = ResourceAttributes.CLOUD_ACCOUNT_ID +CLOUD_REGION = ResourceAttributes.CLOUD_REGION +CLOUD_AVAILABILITY_ZONE = ResourceAttributes.CLOUD_AVAILABILITY_ZONE +CONTAINER_NAME = ResourceAttributes.CONTAINER_NAME +CONTAINER_ID = ResourceAttributes.CONTAINER_ID +CONTAINER_IMAGE_NAME = ResourceAttributes.CONTAINER_IMAGE_NAME +CONTAINER_IMAGE_TAG = ResourceAttributes.CONTAINER_IMAGE_TAG +DEPLOYMENT_ENVIRONMENT = ResourceAttributes.DEPLOYMENT_ENVIRONMENT +FAAS_NAME = ResourceAttributes.FAAS_NAME +FAAS_ID = ResourceAttributes.FAAS_ID +FAAS_VERSION = ResourceAttributes.FAAS_VERSION +FAAS_INSTANCE = ResourceAttributes.FAAS_INSTANCE +HOST_NAME = ResourceAttributes.HOST_NAME +HOST_ARCH = ResourceAttributes.HOST_ARCH +HOST_TYPE = ResourceAttributes.HOST_TYPE +HOST_IMAGE_NAME = ResourceAttributes.HOST_IMAGE_NAME +HOST_IMAGE_ID = ResourceAttributes.HOST_IMAGE_ID +HOST_IMAGE_VERSION = ResourceAttributes.HOST_IMAGE_VERSION +KUBERNETES_CLUSTER_NAME = ResourceAttributes.K8S_CLUSTER_NAME +KUBERNETES_NAMESPACE_NAME = ResourceAttributes.K8S_NAMESPACE_NAME +KUBERNETES_POD_UID = ResourceAttributes.K8S_POD_UID +KUBERNETES_POD_NAME = ResourceAttributes.K8S_POD_NAME +KUBERNETES_CONTAINER_NAME = ResourceAttributes.K8S_CONTAINER_NAME +KUBERNETES_REPLICA_SET_UID = ResourceAttributes.K8S_REPLICASET_UID +KUBERNETES_REPLICA_SET_NAME = ResourceAttributes.K8S_REPLICASET_NAME +KUBERNETES_DEPLOYMENT_UID = ResourceAttributes.K8S_DEPLOYMENT_UID +KUBERNETES_DEPLOYMENT_NAME = ResourceAttributes.K8S_DEPLOYMENT_NAME +KUBERNETES_STATEFUL_SET_UID = ResourceAttributes.K8S_STATEFULSET_UID +KUBERNETES_STATEFUL_SET_NAME = ResourceAttributes.K8S_STATEFULSET_NAME +KUBERNETES_DAEMON_SET_UID = ResourceAttributes.K8S_DAEMONSET_UID +KUBERNETES_DAEMON_SET_NAME = ResourceAttributes.K8S_DAEMONSET_NAME +KUBERNETES_JOB_UID = ResourceAttributes.K8S_JOB_UID +KUBERNETES_JOB_NAME = ResourceAttributes.K8S_JOB_NAME +KUBERNETES_CRON_JOB_UID = ResourceAttributes.K8S_CRONJOB_UID +KUBERNETES_CRON_JOB_NAME = ResourceAttributes.K8S_CRONJOB_NAME +OS_DESCRIPTION = ResourceAttributes.OS_DESCRIPTION +OS_TYPE = ResourceAttributes.OS_TYPE +OS_VERSION = ResourceAttributes.OS_VERSION +PROCESS_PID = ResourceAttributes.PROCESS_PID +PROCESS_PARENT_PID = ResourceAttributes.PROCESS_PARENT_PID +PROCESS_EXECUTABLE_NAME = ResourceAttributes.PROCESS_EXECUTABLE_NAME +PROCESS_EXECUTABLE_PATH = ResourceAttributes.PROCESS_EXECUTABLE_PATH +PROCESS_COMMAND = ResourceAttributes.PROCESS_COMMAND +PROCESS_COMMAND_LINE = ResourceAttributes.PROCESS_COMMAND_LINE +PROCESS_COMMAND_ARGS = ResourceAttributes.PROCESS_COMMAND_ARGS +PROCESS_OWNER = ResourceAttributes.PROCESS_OWNER +PROCESS_RUNTIME_NAME = ResourceAttributes.PROCESS_RUNTIME_NAME +PROCESS_RUNTIME_VERSION = ResourceAttributes.PROCESS_RUNTIME_VERSION +PROCESS_RUNTIME_DESCRIPTION = ResourceAttributes.PROCESS_RUNTIME_DESCRIPTION +SERVICE_NAME = ResourceAttributes.SERVICE_NAME +SERVICE_NAMESPACE = ResourceAttributes.SERVICE_NAMESPACE +SERVICE_INSTANCE_ID = ResourceAttributes.SERVICE_INSTANCE_ID +SERVICE_VERSION = ResourceAttributes.SERVICE_VERSION +TELEMETRY_SDK_NAME = ResourceAttributes.TELEMETRY_SDK_NAME +TELEMETRY_SDK_VERSION = ResourceAttributes.TELEMETRY_SDK_VERSION +TELEMETRY_AUTO_VERSION = ResourceAttributes.TELEMETRY_AUTO_VERSION +TELEMETRY_SDK_LANGUAGE = ResourceAttributes.TELEMETRY_SDK_LANGUAGE + +_OPENTELEMETRY_SDK_VERSION: str = version("opentelemetry-sdk") + + +class Resource: + """A Resource is an immutable representation of the entity producing telemetry as Attributes.""" + + _attributes: BoundedAttributes + _schema_url: str + + def __init__( + self, attributes: Attributes, schema_url: typing.Optional[str] = None + ): + self._attributes = BoundedAttributes(attributes=attributes) + if schema_url is None: + schema_url = "" + self._schema_url = schema_url + + @staticmethod + def create( + attributes: typing.Optional[Attributes] = None, + schema_url: typing.Optional[str] = None, + ) -> "Resource": + """Creates a new `Resource` from attributes. + + `ResourceDetector` instances should not call this method. + + Args: + attributes: Optional zero or more key-value pairs. + schema_url: Optional URL pointing to the schema + + Returns: + The newly-created Resource. + """ + + if not attributes: + attributes = {} + + otel_experimental_resource_detectors = {"otel"}.union( + { + otel_experimental_resource_detector.strip() + for otel_experimental_resource_detector in environ.get( + OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, "" + ).split(",") + if otel_experimental_resource_detector + } + ) + + resource_detectors: List[ResourceDetector] = [] + + resource_detector: str + for resource_detector in otel_experimental_resource_detectors: + try: + resource_detectors.append( + next( + iter( + entry_points( + group="opentelemetry_resource_detector", + name=resource_detector.strip(), + ) # type: ignore + ) + ).load()() + ) + except Exception: # pylint: disable=broad-exception-caught + logger.exception( + "Failed to load resource detector '%s', skipping", + resource_detector, + ) + continue + resource = get_aggregated_resources( + resource_detectors, _DEFAULT_RESOURCE + ).merge(Resource(attributes, schema_url)) + + if not resource.attributes.get(SERVICE_NAME, None): + default_service_name = "unknown_service" + process_executable_name = cast( + Optional[str], + resource.attributes.get(PROCESS_EXECUTABLE_NAME, None), + ) + if process_executable_name: + default_service_name += ":" + process_executable_name + resource = resource.merge( + Resource({SERVICE_NAME: default_service_name}, schema_url) + ) + return resource + + @staticmethod + def get_empty() -> "Resource": + return _EMPTY_RESOURCE + + @property + def attributes(self) -> Attributes: + return self._attributes + + @property + def schema_url(self) -> str: + return self._schema_url + + def merge(self, other: "Resource") -> "Resource": + """Merges this resource and an updating resource into a new `Resource`. + + If a key exists on both the old and updating resource, the value of the + updating resource will override the old resource value. + + The updating resource's `schema_url` will be used only if the old + `schema_url` is empty. Attempting to merge two resources with + different, non-empty values for `schema_url` will result in an error + and return the old resource. + + Args: + other: The other resource to be merged. + + Returns: + The newly-created Resource. + """ + merged_attributes = self.attributes.copy() # type: ignore + merged_attributes.update(other.attributes) # type: ignore + + if self.schema_url == "": + schema_url = other.schema_url + elif other.schema_url == "": + schema_url = self.schema_url + elif self.schema_url == other.schema_url: + schema_url = other.schema_url + else: + logger.error( + "Failed to merge resources: The two schemas %s and %s are incompatible", + self.schema_url, + other.schema_url, + ) + return self + return Resource(merged_attributes, schema_url) # type: ignore + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Resource): + return False + return ( + self._attributes == other._attributes + and self._schema_url == other._schema_url + ) + + def __hash__(self) -> int: + return hash( + f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}" # type: ignore + ) + + def to_json(self, indent: Optional[int] = 4) -> str: + attributes: MutableMapping[str, AttributeValue] = dict( + self._attributes + ) + return dumps( + { + "attributes": attributes, # type: ignore + "schema_url": self._schema_url, + }, + indent=indent, + ) + + +_EMPTY_RESOURCE = Resource({}) +_DEFAULT_RESOURCE = Resource( + { + TELEMETRY_SDK_LANGUAGE: "python", + TELEMETRY_SDK_NAME: "opentelemetry", + TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION, + } +) + + +class ResourceDetector(abc.ABC): + def __init__(self, raise_on_error: bool = False) -> None: + self.raise_on_error = raise_on_error + + @abc.abstractmethod + def detect(self) -> "Resource": + """Don't call `Resource.create` here to avoid an infinite loop, instead instantiate `Resource` directly""" + raise NotImplementedError() + + +class OTELResourceDetector(ResourceDetector): + # pylint: disable=no-self-use + def detect(self) -> "Resource": + env_resources_items = environ.get(OTEL_RESOURCE_ATTRIBUTES) + env_resource_map = {} + + if env_resources_items: + for item in env_resources_items.split(","): + try: + key, value = item.split("=", maxsplit=1) + except ValueError as exc: + logger.warning( + "Invalid key value resource attribute pair %s: %s", + item, + exc, + ) + continue + value_url_decoded = parse.unquote(value.strip()) + env_resource_map[key.strip()] = value_url_decoded + + service_name = environ.get(OTEL_SERVICE_NAME) + if service_name: + env_resource_map[SERVICE_NAME] = service_name + return Resource(env_resource_map) + + +class ProcessResourceDetector(ResourceDetector): + # pylint: disable=no-self-use + def detect(self) -> "Resource": + _runtime_version = ".".join( + map( + str, + ( + sys.version_info[:3] + if sys.version_info.releaselevel == "final" + and not sys.version_info.serial + else sys.version_info + ), + ) + ) + _process_pid = os.getpid() + _process_executable_name = sys.executable + _process_executable_path = os.path.dirname(_process_executable_name) + _process_command = sys.argv[0] + _process_command_line = " ".join(sys.argv) + _process_command_args = sys.argv + resource_info = { + PROCESS_RUNTIME_DESCRIPTION: sys.version, + PROCESS_RUNTIME_NAME: sys.implementation.name, + PROCESS_RUNTIME_VERSION: _runtime_version, + PROCESS_PID: _process_pid, + PROCESS_EXECUTABLE_NAME: _process_executable_name, + PROCESS_EXECUTABLE_PATH: _process_executable_path, + PROCESS_COMMAND: _process_command, + PROCESS_COMMAND_LINE: _process_command_line, + PROCESS_COMMAND_ARGS: _process_command_args, + } + if hasattr(os, "getppid"): + # pypy3 does not have getppid() + resource_info[PROCESS_PARENT_PID] = os.getppid() + + if psutil is not None: + process: psutil_module.Process = psutil.Process() + username = process.username() + resource_info[PROCESS_OWNER] = username + + return Resource(resource_info) # type: ignore + + +class OsResourceDetector(ResourceDetector): + """Detect os resources based on `Operating System conventions <https://opentelemetry.io/docs/specs/semconv/resource/os/>`_.""" + + def detect(self) -> "Resource": + """Returns a resource with with ``os.type`` and ``os.version``. + + Python's platform library + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + To grab this information, Python's ``platform`` does not return what a + user might expect it to. Below is a breakdown of its return values in + different operating systems. + + .. code-block:: python + :caption: Linux + + >>> platform.system() + 'Linux' + >>> platform.release() + '6.5.0-35-generic' + >>> platform.version() + '#35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May 7 09:00:52 UTC 2' + + .. code-block:: python + :caption: MacOS + + >>> platform.system() + 'Darwin' + >>> platform.release() + '23.0.0' + >>> platform.version() + 'Darwin Kernel Version 23.0.0: Fri Sep 15 14:42:57 PDT 2023; root:xnu-10002.1.13~1/RELEASE_ARM64_T8112' + + .. code-block:: python + :caption: Windows + + >>> platform.system() + 'Windows' + >>> platform.release() + '2022Server' + >>> platform.version() + '10.0.20348' + + .. code-block:: python + :caption: FreeBSD + + >>> platform.system() + 'FreeBSD' + >>> platform.release() + '14.1-RELEASE' + >>> platform.version() + 'FreeBSD 14.1-RELEASE releng/14.1-n267679-10e31f0946d8 GENERIC' + + .. code-block:: python + :caption: Solaris + + >>> platform.system() + 'SunOS' + >>> platform.release() + '5.11' + >>> platform.version() + '11.4.0.15.0' + + """ + + os_type = platform.system().lower() + os_version = platform.release() + + # See docstring + if os_type == "windows": + os_version = platform.version() + # Align SunOS with conventions + elif os_type == "sunos": + os_type = "solaris" + os_version = platform.version() + + return Resource( + { + OS_TYPE: os_type, + OS_VERSION: os_version, + } + ) + + +class _HostResourceDetector(ResourceDetector): + """ + The HostResourceDetector detects the hostname and architecture attributes. + """ + + def detect(self) -> "Resource": + return Resource( + { + HOST_NAME: socket.gethostname(), + HOST_ARCH: platform.machine(), + } + ) + + +def get_aggregated_resources( + detectors: typing.List["ResourceDetector"], + initial_resource: typing.Optional[Resource] = None, + timeout: int = 5, +) -> "Resource": + """Retrieves resources from detectors in the order that they were passed + + :param detectors: List of resources in order of priority + :param initial_resource: Static resource. This has highest priority + :param timeout: Number of seconds to wait for each detector to return + :return: + """ + detectors_merged_resource = initial_resource or Resource.create() + + with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + futures = [executor.submit(detector.detect) for detector in detectors] + for detector_ind, future in enumerate(futures): + detector = detectors[detector_ind] + detected_resource: Resource = _EMPTY_RESOURCE + try: + detected_resource = future.result(timeout=timeout) + except concurrent.futures.TimeoutError as ex: + if detector.raise_on_error: + raise ex + logger.warning( + "Detector %s took longer than %s seconds, skipping", + detector, + timeout, + ) + # pylint: disable=broad-exception-caught + except Exception as ex: + if detector.raise_on_error: + raise ex + logger.warning( + "Exception %s in detector %s, ignoring", ex, detector + ) + finally: + detectors_merged_resource = detectors_merged_resource.merge( + detected_resource + ) + + return detectors_merged_resource diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py new file mode 100644 index 00000000..3ac45806 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/__init__.py @@ -0,0 +1,1305 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-lines +import abc +import atexit +import concurrent.futures +import json +import logging +import threading +import traceback +import typing +from os import environ +from time import time_ns +from types import MappingProxyType, TracebackType +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) +from warnings import filterwarnings + +from deprecated import deprecated + +from opentelemetry import context as context_api +from opentelemetry import trace as trace_api +from opentelemetry.attributes import BoundedAttributes +from opentelemetry.sdk import util +from opentelemetry.sdk.environment_variables import ( + OTEL_ATTRIBUTE_COUNT_LIMIT, + OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, + OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, + OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, + OTEL_SDK_DISABLED, + OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, + OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT, + OTEL_SPAN_EVENT_COUNT_LIMIT, + OTEL_SPAN_LINK_COUNT_LIMIT, +) +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import sampling +from opentelemetry.sdk.trace.id_generator import IdGenerator, RandomIdGenerator +from opentelemetry.sdk.util import BoundedList +from opentelemetry.sdk.util.instrumentation import ( + InstrumentationInfo, + InstrumentationScope, +) +from opentelemetry.semconv.attributes.exception_attributes import ( + EXCEPTION_ESCAPED, + EXCEPTION_MESSAGE, + EXCEPTION_STACKTRACE, + EXCEPTION_TYPE, +) +from opentelemetry.trace import NoOpTracer, SpanContext +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.util import types +from opentelemetry.util._decorator import _agnosticcontextmanager + +logger = logging.getLogger(__name__) + +_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128 +_DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = 128 +_DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = 128 +_DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = 128 +_DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT = 128 +_DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT = 128 + + +_ENV_VALUE_UNSET = "" + + +class SpanProcessor: + """Interface which allows hooks for SDK's `Span` start and end method + invocations. + + Span processors can be registered directly using + :func:`TracerProvider.add_span_processor` and they are invoked + in the same order as they were registered. + """ + + def on_start( + self, + span: "Span", + parent_context: Optional[context_api.Context] = None, + ) -> None: + """Called when a :class:`opentelemetry.trace.Span` is started. + + This method is called synchronously on the thread that starts the + span, therefore it should not block or throw an exception. + + Args: + span: The :class:`opentelemetry.trace.Span` that just started. + parent_context: The parent context of the span that just started. + """ + + def on_end(self, span: "ReadableSpan") -> None: + """Called when a :class:`opentelemetry.trace.Span` is ended. + + This method is called synchronously on the thread that ends the + span, therefore it should not block or throw an exception. + + Args: + span: The :class:`opentelemetry.trace.Span` that just ended. + """ + + def shutdown(self) -> None: + """Called when a :class:`opentelemetry.sdk.trace.TracerProvider` is shutdown.""" + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Export all ended spans to the configured Exporter that have not yet + been exported. + + Args: + timeout_millis: The maximum amount of time to wait for spans to be + exported. + + Returns: + False if the timeout is exceeded, True otherwise. + """ + + +# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved +# pylint:disable=no-member +class SynchronousMultiSpanProcessor(SpanProcessor): + """Implementation of class:`SpanProcessor` that forwards all received + events to a list of span processors sequentially. + + The underlying span processors are called in sequential order as they were + added. + """ + + _span_processors: Tuple[SpanProcessor, ...] + + def __init__(self): + # use a tuple to avoid race conditions when adding a new span and + # iterating through it on "on_start" and "on_end". + self._span_processors = () + self._lock = threading.Lock() + + def add_span_processor(self, span_processor: SpanProcessor) -> None: + """Adds a SpanProcessor to the list handled by this instance.""" + with self._lock: + self._span_processors += (span_processor,) + + def on_start( + self, + span: "Span", + parent_context: Optional[context_api.Context] = None, + ) -> None: + for sp in self._span_processors: + sp.on_start(span, parent_context=parent_context) + + def on_end(self, span: "ReadableSpan") -> None: + for sp in self._span_processors: + sp.on_end(span) + + def shutdown(self) -> None: + """Sequentially shuts down all underlying span processors.""" + for sp in self._span_processors: + sp.shutdown() + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Sequentially calls force_flush on all underlying + :class:`SpanProcessor` + + Args: + timeout_millis: The maximum amount of time over all span processors + to wait for spans to be exported. In case the first n span + processors exceeded the timeout followup span processors will be + skipped. + + Returns: + True if all span processors flushed their spans within the + given timeout, False otherwise. + """ + deadline_ns = time_ns() + timeout_millis * 1000000 + for sp in self._span_processors: + current_time_ns = time_ns() + if current_time_ns >= deadline_ns: + return False + + if not sp.force_flush((deadline_ns - current_time_ns) // 1000000): + return False + + return True + + +class ConcurrentMultiSpanProcessor(SpanProcessor): + """Implementation of :class:`SpanProcessor` that forwards all received + events to a list of span processors in parallel. + + Calls to the underlying span processors are forwarded in parallel by + submitting them to a thread pool executor and waiting until each span + processor finished its work. + + Args: + num_threads: The number of threads managed by the thread pool executor + and thus defining how many span processors can work in parallel. + """ + + def __init__(self, num_threads: int = 2): + # use a tuple to avoid race conditions when adding a new span and + # iterating through it on "on_start" and "on_end". + self._span_processors = () # type: Tuple[SpanProcessor, ...] + self._lock = threading.Lock() + self._executor = concurrent.futures.ThreadPoolExecutor( + max_workers=num_threads + ) + + def add_span_processor(self, span_processor: SpanProcessor) -> None: + """Adds a SpanProcessor to the list handled by this instance.""" + with self._lock: + self._span_processors += (span_processor,) + + def _submit_and_await( + self, + func: Callable[[SpanProcessor], Callable[..., None]], + *args: Any, + **kwargs: Any, + ): + futures = [] + for sp in self._span_processors: + future = self._executor.submit(func(sp), *args, **kwargs) + futures.append(future) + for future in futures: + future.result() + + def on_start( + self, + span: "Span", + parent_context: Optional[context_api.Context] = None, + ) -> None: + self._submit_and_await( + lambda sp: sp.on_start, span, parent_context=parent_context + ) + + def on_end(self, span: "ReadableSpan") -> None: + self._submit_and_await(lambda sp: sp.on_end, span) + + def shutdown(self) -> None: + """Shuts down all underlying span processors in parallel.""" + self._submit_and_await(lambda sp: sp.shutdown) + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Calls force_flush on all underlying span processors in parallel. + + Args: + timeout_millis: The maximum amount of time to wait for spans to be + exported. + + Returns: + True if all span processors flushed their spans within the given + timeout, False otherwise. + """ + futures = [] + for sp in self._span_processors: # type: SpanProcessor + future = self._executor.submit(sp.force_flush, timeout_millis) + futures.append(future) + + timeout_sec = timeout_millis / 1e3 + done_futures, not_done_futures = concurrent.futures.wait( + futures, timeout_sec + ) + if not_done_futures: + return False + + for future in done_futures: + if not future.result(): + return False + + return True + + +class EventBase(abc.ABC): + def __init__(self, name: str, timestamp: Optional[int] = None) -> None: + self._name = name + if timestamp is None: + self._timestamp = time_ns() + else: + self._timestamp = timestamp + + @property + def name(self) -> str: + return self._name + + @property + def timestamp(self) -> int: + return self._timestamp + + @property + @abc.abstractmethod + def attributes(self) -> types.Attributes: + pass + + +class Event(EventBase): + """A text annotation with a set of attributes. The attributes of an event + are immutable. + + Args: + name: Name of the event. + attributes: Attributes of the event. + timestamp: Timestamp of the event. If `None` it will filled + automatically. + """ + + def __init__( + self, + name: str, + attributes: types.Attributes = None, + timestamp: Optional[int] = None, + limit: Optional[int] = _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, + ) -> None: + super().__init__(name, timestamp) + self._attributes = attributes + + @property + def attributes(self) -> types.Attributes: + return self._attributes + + @property + def dropped_attributes(self) -> int: + if isinstance(self._attributes, BoundedAttributes): + return self._attributes.dropped + return 0 + + +def _check_span_ended(func): + def wrapper(self, *args, **kwargs): + already_ended = False + with self._lock: # pylint: disable=protected-access + if self._end_time is None: # pylint: disable=protected-access + func(self, *args, **kwargs) + else: + already_ended = True + + if already_ended: + logger.warning("Tried calling %s on an ended span.", func.__name__) + + return wrapper + + +def _is_valid_link(context: SpanContext, attributes: types.Attributes) -> bool: + return bool( + context and (context.is_valid or (attributes or context.trace_state)) + ) + + +class ReadableSpan: + """Provides read-only access to span attributes. + + Users should NOT be creating these objects directly. `ReadableSpan`s are created as + a direct result from using the tracing pipeline via the `Tracer`. + + """ + + def __init__( + self, + name: str, + context: Optional[trace_api.SpanContext] = None, + parent: Optional[trace_api.SpanContext] = None, + resource: Optional[Resource] = None, + attributes: types.Attributes = None, + events: Sequence[Event] = (), + links: Sequence[trace_api.Link] = (), + kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, + instrumentation_info: Optional[InstrumentationInfo] = None, + status: Status = Status(StatusCode.UNSET), + start_time: Optional[int] = None, + end_time: Optional[int] = None, + instrumentation_scope: Optional[InstrumentationScope] = None, + ) -> None: + self._name = name + self._context = context + self._kind = kind + self._instrumentation_info = instrumentation_info + self._instrumentation_scope = instrumentation_scope + self._parent = parent + self._start_time = start_time + self._end_time = end_time + self._attributes = attributes + self._events = events + self._links = links + if resource is None: + self._resource = Resource.create({}) + else: + self._resource = resource + self._status = status + + @property + def dropped_attributes(self) -> int: + if isinstance(self._attributes, BoundedAttributes): + return self._attributes.dropped + return 0 + + @property + def dropped_events(self) -> int: + if isinstance(self._events, BoundedList): + return self._events.dropped + return 0 + + @property + def dropped_links(self) -> int: + if isinstance(self._links, BoundedList): + return self._links.dropped + return 0 + + @property + def name(self) -> str: + return self._name + + def get_span_context(self): + return self._context + + @property + def context(self): + return self._context + + @property + def kind(self) -> trace_api.SpanKind: + return self._kind + + @property + def parent(self) -> Optional[trace_api.SpanContext]: + return self._parent + + @property + def start_time(self) -> Optional[int]: + return self._start_time + + @property + def end_time(self) -> Optional[int]: + return self._end_time + + @property + def status(self) -> trace_api.Status: + return self._status + + @property + def attributes(self) -> types.Attributes: + return MappingProxyType(self._attributes or {}) + + @property + def events(self) -> Sequence[Event]: + return tuple(event for event in self._events) + + @property + def links(self) -> Sequence[trace_api.Link]: + return tuple(link for link in self._links) + + @property + def resource(self) -> Resource: + return self._resource + + @property + @deprecated( + version="1.11.1", reason="You should use instrumentation_scope" + ) + def instrumentation_info(self) -> Optional[InstrumentationInfo]: + return self._instrumentation_info + + @property + def instrumentation_scope(self) -> Optional[InstrumentationScope]: + return self._instrumentation_scope + + def to_json(self, indent: Optional[int] = 4): + parent_id = None + if self.parent is not None: + parent_id = f"0x{trace_api.format_span_id(self.parent.span_id)}" + + start_time = None + if self._start_time: + start_time = util.ns_to_iso_str(self._start_time) + + end_time = None + if self._end_time: + end_time = util.ns_to_iso_str(self._end_time) + + status = { + "status_code": str(self._status.status_code.name), + } + if self._status.description: + status["description"] = self._status.description + + f_span = { + "name": self._name, + "context": ( + self._format_context(self._context) if self._context else None + ), + "kind": str(self.kind), + "parent_id": parent_id, + "start_time": start_time, + "end_time": end_time, + "status": status, + "attributes": self._format_attributes(self._attributes), + "events": self._format_events(self._events), + "links": self._format_links(self._links), + "resource": json.loads(self.resource.to_json()), + } + + return json.dumps(f_span, indent=indent) + + @staticmethod + def _format_context(context: SpanContext) -> Dict[str, str]: + return { + "trace_id": f"0x{trace_api.format_trace_id(context.trace_id)}", + "span_id": f"0x{trace_api.format_span_id(context.span_id)}", + "trace_state": repr(context.trace_state), + } + + @staticmethod + def _format_attributes( + attributes: types.Attributes, + ) -> Optional[Dict[str, Any]]: + if attributes is not None and not isinstance(attributes, dict): + return dict(attributes) + return attributes + + @staticmethod + def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]: + return [ + { + "name": event.name, + "timestamp": util.ns_to_iso_str(event.timestamp), + "attributes": Span._format_attributes( # pylint: disable=protected-access + event.attributes + ), + } + for event in events + ] + + @staticmethod + def _format_links(links: Sequence[trace_api.Link]) -> List[Dict[str, Any]]: + return [ + { + "context": Span._format_context( # pylint: disable=protected-access + link.context + ), + "attributes": Span._format_attributes( # pylint: disable=protected-access + link.attributes + ), + } + for link in links + ] + + +class SpanLimits: + """The limits that should be enforce on recorded data such as events, links, attributes etc. + + This class does not enforce any limits itself. It only provides an a way read limits from env, + default values and from user provided arguments. + + All limit arguments must be either a non-negative integer, ``None`` or ``SpanLimits.UNSET``. + + - All limit arguments are optional. + - If a limit argument is not set, the class will try to read its value from the corresponding + environment variable. + - If the environment variable is not set, the default value, if any, will be used. + + Limit precedence: + + - If a model specific limit is set, it will be used. + - Else if the corresponding global limit is set, it will be used. + - Else if the model specific limit has a default value, the default value will be used. + - Else if the global limit has a default value, the default value will be used. + + Args: + max_attributes: Maximum number of attributes that can be added to a span, event, and link. + Environment variable: OTEL_ATTRIBUTE_COUNT_LIMIT + Default: {_DEFAULT_ATTRIBUTE_COUNT_LIMIT} + max_events: Maximum number of events that can be added to a Span. + Environment variable: OTEL_SPAN_EVENT_COUNT_LIMIT + Default: {_DEFAULT_SPAN_EVENT_COUNT_LIMIT} + max_links: Maximum number of links that can be added to a Span. + Environment variable: OTEL_SPAN_LINK_COUNT_LIMIT + Default: {_DEFAULT_SPAN_LINK_COUNT_LIMIT} + max_span_attributes: Maximum number of attributes that can be added to a Span. + Environment variable: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT + Default: {_DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT} + max_event_attributes: Maximum number of attributes that can be added to an Event. + Default: {_DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT} + max_link_attributes: Maximum number of attributes that can be added to a Link. + Default: {_DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT} + max_attribute_length: Maximum length an attribute value can have. Values longer than + the specified length will be truncated. + max_span_attribute_length: Maximum length a span attribute value can have. Values longer than + the specified length will be truncated. + """ + + UNSET = -1 + + def __init__( + self, + max_attributes: Optional[int] = None, + max_events: Optional[int] = None, + max_links: Optional[int] = None, + max_span_attributes: Optional[int] = None, + max_event_attributes: Optional[int] = None, + max_link_attributes: Optional[int] = None, + max_attribute_length: Optional[int] = None, + max_span_attribute_length: Optional[int] = None, + ): + # span events and links count + self.max_events = self._from_env_if_absent( + max_events, + OTEL_SPAN_EVENT_COUNT_LIMIT, + _DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT, + ) + self.max_links = self._from_env_if_absent( + max_links, + OTEL_SPAN_LINK_COUNT_LIMIT, + _DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT, + ) + + # attribute count + global_max_attributes = self._from_env_if_absent( + max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT + ) + self.max_attributes = ( + global_max_attributes + if global_max_attributes is not None + else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT + ) + + self.max_span_attributes = self._from_env_if_absent( + max_span_attributes, + OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, + ( + global_max_attributes + if global_max_attributes is not None + else _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT + ), + ) + self.max_event_attributes = self._from_env_if_absent( + max_event_attributes, + OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, + ( + global_max_attributes + if global_max_attributes is not None + else _DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT + ), + ) + self.max_link_attributes = self._from_env_if_absent( + max_link_attributes, + OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, + ( + global_max_attributes + if global_max_attributes is not None + else _DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT + ), + ) + + # attribute length + self.max_attribute_length = self._from_env_if_absent( + max_attribute_length, + OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, + ) + self.max_span_attribute_length = self._from_env_if_absent( + max_span_attribute_length, + OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT, + # use global attribute length limit as default + self.max_attribute_length, + ) + + def __repr__(self): + return f"{type(self).__name__}(max_span_attributes={self.max_span_attributes}, max_events_attributes={self.max_event_attributes}, max_link_attributes={self.max_link_attributes}, max_attributes={self.max_attributes}, max_events={self.max_events}, max_links={self.max_links}, max_attribute_length={self.max_attribute_length})" + + @classmethod + def _from_env_if_absent( + cls, value: Optional[int], env_var: str, default: Optional[int] = None + ) -> Optional[int]: + if value == cls.UNSET: + return None + + err_msg = "{} must be a non-negative integer but got {}" + + # if no value is provided for the limit, try to load it from env + if value is None: + # return default value if env var is not set + if env_var not in environ: + return default + + str_value = environ.get(env_var, "").strip().lower() + if str_value == _ENV_VALUE_UNSET: + return None + + try: + value = int(str_value) + except ValueError: + raise ValueError(err_msg.format(env_var, str_value)) + + if value < 0: + raise ValueError(err_msg.format(env_var, value)) + return value + + +_UnsetLimits = SpanLimits( + max_attributes=SpanLimits.UNSET, + max_events=SpanLimits.UNSET, + max_links=SpanLimits.UNSET, + max_span_attributes=SpanLimits.UNSET, + max_event_attributes=SpanLimits.UNSET, + max_link_attributes=SpanLimits.UNSET, + max_attribute_length=SpanLimits.UNSET, + max_span_attribute_length=SpanLimits.UNSET, +) + +# not removed for backward compat. please use SpanLimits instead. +SPAN_ATTRIBUTE_COUNT_LIMIT = SpanLimits._from_env_if_absent( # pylint: disable=protected-access + None, + OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, + _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, +) + + +class Span(trace_api.Span, ReadableSpan): + """See `opentelemetry.trace.Span`. + + Users should create `Span` objects via the `Tracer` instead of this + constructor. + + Args: + name: The name of the operation this span represents + context: The immutable span context + parent: This span's parent's `opentelemetry.trace.SpanContext`, or + None if this is a root span + sampler: The sampler used to create this span + trace_config: TODO + resource: Entity producing telemetry + attributes: The span's attributes to be exported + events: Timestamped events to be exported + links: Links to other spans to be exported + span_processor: `SpanProcessor` to invoke when starting and ending + this `Span`. + limits: `SpanLimits` instance that was passed to the `TracerProvider` + """ + + def __new__(cls, *args, **kwargs): + if cls is Span: + raise TypeError("Span must be instantiated via a tracer.") + return super().__new__(cls) + + # pylint: disable=too-many-locals + def __init__( + self, + name: str, + context: trace_api.SpanContext, + parent: Optional[trace_api.SpanContext] = None, + sampler: Optional[sampling.Sampler] = None, + trace_config: None = None, # TODO + resource: Optional[Resource] = None, + attributes: types.Attributes = None, + events: Optional[Sequence[Event]] = None, + links: Sequence[trace_api.Link] = (), + kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, + span_processor: SpanProcessor = SpanProcessor(), + instrumentation_info: Optional[InstrumentationInfo] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + limits=_UnsetLimits, + instrumentation_scope: Optional[InstrumentationScope] = None, + ) -> None: + if resource is None: + resource = Resource.create({}) + super().__init__( + name=name, + context=context, + parent=parent, + kind=kind, + resource=resource, + instrumentation_info=instrumentation_info, + instrumentation_scope=instrumentation_scope, + ) + self._sampler = sampler + self._trace_config = trace_config + self._record_exception = record_exception + self._set_status_on_exception = set_status_on_exception + self._span_processor = span_processor + self._limits = limits + self._lock = threading.Lock() + self._attributes = BoundedAttributes( + self._limits.max_span_attributes, + attributes, + immutable=False, + max_value_len=self._limits.max_span_attribute_length, + ) + self._events = self._new_events() + if events: + for event in events: + event._attributes = BoundedAttributes( + self._limits.max_event_attributes, + event.attributes, + max_value_len=self._limits.max_attribute_length, + ) + self._events.append(event) + + self._links = self._new_links(links) + + def __repr__(self): + return f'{type(self).__name__}(name="{self._name}", context={self._context})' + + def _new_events(self): + return BoundedList(self._limits.max_events) + + def _new_links(self, links: Sequence[trace_api.Link]): + if not links: + return BoundedList(self._limits.max_links) + + valid_links = [] + for link in links: + if link and _is_valid_link(link.context, link.attributes): + # pylint: disable=protected-access + link._attributes = BoundedAttributes( + self._limits.max_link_attributes, + link.attributes, + max_value_len=self._limits.max_attribute_length, + ) + valid_links.append(link) + + return BoundedList.from_seq(self._limits.max_links, valid_links) + + def get_span_context(self): + return self._context + + def set_attributes( + self, attributes: Mapping[str, types.AttributeValue] + ) -> None: + with self._lock: + if self._end_time is not None: + logger.warning("Setting attribute on ended span.") + return + + for key, value in attributes.items(): + self._attributes[key] = value + + def set_attribute(self, key: str, value: types.AttributeValue) -> None: + return self.set_attributes({key: value}) + + @_check_span_ended + def _add_event(self, event: EventBase) -> None: + self._events.append(event) + + def add_event( + self, + name: str, + attributes: types.Attributes = None, + timestamp: Optional[int] = None, + ) -> None: + attributes = BoundedAttributes( + self._limits.max_event_attributes, + attributes, + max_value_len=self._limits.max_attribute_length, + ) + self._add_event( + Event( + name=name, + attributes=attributes, + timestamp=timestamp, + ) + ) + + @_check_span_ended + def _add_link(self, link: trace_api.Link) -> None: + self._links.append(link) + + def add_link( + self, + context: SpanContext, + attributes: types.Attributes = None, + ) -> None: + if not _is_valid_link(context, attributes): + return + + attributes = BoundedAttributes( + self._limits.max_link_attributes, + attributes, + max_value_len=self._limits.max_attribute_length, + ) + self._add_link( + trace_api.Link( + context=context, + attributes=attributes, + ) + ) + + def _readable_span(self) -> ReadableSpan: + return ReadableSpan( + name=self._name, + context=self._context, + parent=self._parent, + resource=self._resource, + attributes=self._attributes, + events=self._events, + links=self._links, + kind=self.kind, + status=self._status, + start_time=self._start_time, + end_time=self._end_time, + instrumentation_info=self._instrumentation_info, + instrumentation_scope=self._instrumentation_scope, + ) + + def start( + self, + start_time: Optional[int] = None, + parent_context: Optional[context_api.Context] = None, + ) -> None: + with self._lock: + if self._start_time is not None: + logger.warning("Calling start() on a started span.") + return + self._start_time = ( + start_time if start_time is not None else time_ns() + ) + + self._span_processor.on_start(self, parent_context=parent_context) + + def end(self, end_time: Optional[int] = None) -> None: + with self._lock: + if self._start_time is None: + raise RuntimeError("Calling end() on a not started span.") + if self._end_time is not None: + logger.warning("Calling end() on an ended span.") + return + + self._end_time = end_time if end_time is not None else time_ns() + + self._span_processor.on_end(self._readable_span()) + + @_check_span_ended + def update_name(self, name: str) -> None: + self._name = name + + def is_recording(self) -> bool: + return self._end_time is None + + @_check_span_ended + def set_status( + self, + status: typing.Union[Status, StatusCode], + description: typing.Optional[str] = None, + ) -> None: + # Ignore future calls if status is already set to OK + # Ignore calls to set to StatusCode.UNSET + if isinstance(status, Status): + if ( + self._status + and self._status.status_code is StatusCode.OK + or status.status_code is StatusCode.UNSET + ): + return + if description is not None: + logger.warning( + "Description %s ignored. Use either `Status` or `(StatusCode, Description)`", + description, + ) + self._status = status + elif isinstance(status, StatusCode): + if ( + self._status + and self._status.status_code is StatusCode.OK + or status is StatusCode.UNSET + ): + return + self._status = Status(status, description) + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + """Ends context manager and calls `end` on the `Span`.""" + if exc_val is not None and self.is_recording(): + # Record the exception as an event + # pylint:disable=protected-access + if self._record_exception: + self.record_exception(exception=exc_val, escaped=True) + # Records status if span is used as context manager + # i.e. with tracer.start_span() as span: + if self._set_status_on_exception: + self.set_status( + Status( + status_code=StatusCode.ERROR, + description=f"{exc_type.__name__}: {exc_val}", + ) + ) + + super().__exit__(exc_type, exc_val, exc_tb) + + def record_exception( + self, + exception: BaseException, + attributes: types.Attributes = None, + timestamp: Optional[int] = None, + escaped: bool = False, + ) -> None: + """Records an exception as a span event.""" + # TODO: keep only exception as first argument after baseline is 3.10 + stacktrace = "".join( + traceback.format_exception( + type(exception), value=exception, tb=exception.__traceback__ + ) + ) + module = type(exception).__module__ + qualname = type(exception).__qualname__ + exception_type = ( + f"{module}.{qualname}" + if module and module != "builtins" + else qualname + ) + _attributes: MutableMapping[str, types.AttributeValue] = { + EXCEPTION_TYPE: exception_type, + EXCEPTION_MESSAGE: str(exception), + EXCEPTION_STACKTRACE: stacktrace, + EXCEPTION_ESCAPED: str(escaped), + } + if attributes: + _attributes.update(attributes) + self.add_event( + name="exception", attributes=_attributes, timestamp=timestamp + ) + + +class _Span(Span): + """Protected implementation of `opentelemetry.trace.Span`. + + This constructor exists to prevent the instantiation of the `Span` class + by other mechanisms than through the `Tracer`. + """ + + +class Tracer(trace_api.Tracer): + """See `opentelemetry.trace.Tracer`.""" + + def __init__( + self, + sampler: sampling.Sampler, + resource: Resource, + span_processor: Union[ + SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor + ], + id_generator: IdGenerator, + instrumentation_info: InstrumentationInfo, + span_limits: SpanLimits, + instrumentation_scope: InstrumentationScope, + ) -> None: + self.sampler = sampler + self.resource = resource + self.span_processor = span_processor + self.id_generator = id_generator + self.instrumentation_info = instrumentation_info + self._span_limits = span_limits + self._instrumentation_scope = instrumentation_scope + + @_agnosticcontextmanager # pylint: disable=protected-access + def start_as_current_span( + self, + name: str, + context: Optional[context_api.Context] = None, + kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: Optional[Sequence[trace_api.Link]] = (), + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + end_on_exit: bool = True, + ) -> Iterator[trace_api.Span]: + span = self.start_span( + name=name, + context=context, + kind=kind, + attributes=attributes, + links=links, + start_time=start_time, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + ) + with trace_api.use_span( + span, + end_on_exit=end_on_exit, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + ) as span: + yield span + + def start_span( # pylint: disable=too-many-locals + self, + name: str, + context: Optional[context_api.Context] = None, + kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: Optional[Sequence[trace_api.Link]] = (), + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + ) -> trace_api.Span: + parent_span_context = trace_api.get_current_span( + context + ).get_span_context() + + if parent_span_context is not None and not isinstance( + parent_span_context, trace_api.SpanContext + ): + raise TypeError( + "parent_span_context must be a SpanContext or None." + ) + + # is_valid determines root span + if parent_span_context is None or not parent_span_context.is_valid: + parent_span_context = None + trace_id = self.id_generator.generate_trace_id() + else: + trace_id = parent_span_context.trace_id + + # The sampler decides whether to create a real or no-op span at the + # time of span creation. No-op spans do not record events, and are not + # exported. + # The sampler may also add attributes to the newly-created span, e.g. + # to include information about the sampling result. + # The sampler may also modify the parent span context's tracestate + sampling_result = self.sampler.should_sample( + context, trace_id, name, kind, attributes, links + ) + + trace_flags = ( + trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED) + if sampling_result.decision.is_sampled() + else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT) + ) + span_context = trace_api.SpanContext( + trace_id, + self.id_generator.generate_span_id(), + is_remote=False, + trace_flags=trace_flags, + trace_state=sampling_result.trace_state, + ) + + # Only record if is_recording() is true + if sampling_result.decision.is_recording(): + # pylint:disable=protected-access + span = _Span( + name=name, + context=span_context, + parent=parent_span_context, + sampler=self.sampler, + resource=self.resource, + attributes=sampling_result.attributes.copy(), + span_processor=self.span_processor, + kind=kind, + links=links, + instrumentation_info=self.instrumentation_info, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + limits=self._span_limits, + instrumentation_scope=self._instrumentation_scope, + ) + span.start(start_time=start_time, parent_context=context) + else: + span = trace_api.NonRecordingSpan(context=span_context) + return span + + +class TracerProvider(trace_api.TracerProvider): + """See `opentelemetry.trace.TracerProvider`.""" + + def __init__( + self, + sampler: Optional[sampling.Sampler] = None, + resource: Optional[Resource] = None, + shutdown_on_exit: bool = True, + active_span_processor: Union[ + SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor, None + ] = None, + id_generator: Optional[IdGenerator] = None, + span_limits: Optional[SpanLimits] = None, + ) -> None: + self._active_span_processor = ( + active_span_processor or SynchronousMultiSpanProcessor() + ) + if id_generator is None: + self.id_generator = RandomIdGenerator() + else: + self.id_generator = id_generator + if resource is None: + self._resource = Resource.create({}) + else: + self._resource = resource + if not sampler: + sampler = sampling._get_from_env_or_default() + self.sampler = sampler + self._span_limits = span_limits or SpanLimits() + disabled = environ.get(OTEL_SDK_DISABLED, "") + self._disabled = disabled.lower().strip() == "true" + self._atexit_handler = None + + if shutdown_on_exit: + self._atexit_handler = atexit.register(self.shutdown) + + @property + def resource(self) -> Resource: + return self._resource + + def get_tracer( + self, + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, + ) -> "trace_api.Tracer": + if self._disabled: + return NoOpTracer() + if not instrumenting_module_name: # Reject empty strings too. + instrumenting_module_name = "" + logger.error("get_tracer called with missing module name.") + if instrumenting_library_version is None: + instrumenting_library_version = "" + + filterwarnings( + "ignore", + message=( + r"Call to deprecated method __init__. \(You should use " + r"InstrumentationScope\) -- Deprecated since version 1.11.1." + ), + category=DeprecationWarning, + module="opentelemetry.sdk.trace", + ) + + instrumentation_info = InstrumentationInfo( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + ) + + return Tracer( + self.sampler, + self.resource, + self._active_span_processor, + self.id_generator, + instrumentation_info, + self._span_limits, + InstrumentationScope( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, + ), + ) + + def add_span_processor(self, span_processor: SpanProcessor) -> None: + """Registers a new :class:`SpanProcessor` for this `TracerProvider`. + + The span processors are invoked in the same order they are registered. + """ + + # no lock here because add_span_processor is thread safe for both + # SynchronousMultiSpanProcessor and ConcurrentMultiSpanProcessor. + self._active_span_processor.add_span_processor(span_processor) + + def shutdown(self) -> None: + """Shut down the span processors added to the tracer provider.""" + self._active_span_processor.shutdown() + if self._atexit_handler is not None: + atexit.unregister(self._atexit_handler) + self._atexit_handler = None + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Requests the active span processor to process all spans that have not + yet been processed. + + By default force flush is called sequentially on all added span + processors. This means that span processors further back in the list + have less time to flush their spans. + To have span processors flush their spans in parallel it is possible to + initialize the tracer provider with an instance of + `ConcurrentMultiSpanProcessor` at the cost of using multiple threads. + + Args: + timeout_millis: The maximum amount of time to wait for spans to be + processed. + + Returns: + False if the timeout is exceeded, True otherwise. + """ + return self._active_span_processor.force_flush(timeout_millis) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py new file mode 100644 index 00000000..47d1769a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/__init__.py @@ -0,0 +1,517 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import collections +import logging +import os +import sys +import threading +import typing +from enum import Enum +from os import environ, linesep +from time import time_ns + +from opentelemetry.context import ( + _SUPPRESS_INSTRUMENTATION_KEY, + Context, + attach, + detach, + set_value, +) +from opentelemetry.sdk.environment_variables import ( + OTEL_BSP_EXPORT_TIMEOUT, + OTEL_BSP_MAX_EXPORT_BATCH_SIZE, + OTEL_BSP_MAX_QUEUE_SIZE, + OTEL_BSP_SCHEDULE_DELAY, +) +from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor +from opentelemetry.util._once import Once + +_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000 +_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512 +_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000 +_DEFAULT_MAX_QUEUE_SIZE = 2048 +_ENV_VAR_INT_VALUE_ERROR_MESSAGE = ( + "Unable to parse value for %s as integer. Defaulting to %s." +) + +logger = logging.getLogger(__name__) + + +class SpanExportResult(Enum): + SUCCESS = 0 + FAILURE = 1 + + +class SpanExporter: + """Interface for exporting spans. + + Interface to be implemented by services that want to export spans recorded + in their own format. + + To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a + `SimpleSpanProcessor` or a `BatchSpanProcessor`. + """ + + def export( + self, spans: typing.Sequence[ReadableSpan] + ) -> "SpanExportResult": + """Exports a batch of telemetry data. + + Args: + spans: The list of `opentelemetry.trace.Span` objects to be exported + + Returns: + The result of the export + """ + + def shutdown(self) -> None: + """Shuts down the exporter. + + Called when the SDK is shut down. + """ + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Hint to ensure that the export of any spans the exporter has received + prior to the call to ForceFlush SHOULD be completed as soon as possible, preferably + before returning from this method. + """ + + +class SimpleSpanProcessor(SpanProcessor): + """Simple SpanProcessor implementation. + + SimpleSpanProcessor is an implementation of `SpanProcessor` that + passes ended spans directly to the configured `SpanExporter`. + """ + + def __init__(self, span_exporter: SpanExporter): + self.span_exporter = span_exporter + + def on_start( + self, span: Span, parent_context: typing.Optional[Context] = None + ) -> None: + pass + + def on_end(self, span: ReadableSpan) -> None: + if not span.context.trace_flags.sampled: + return + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + try: + self.span_exporter.export((span,)) + # pylint: disable=broad-exception-caught + except Exception: + logger.exception("Exception while exporting Span.") + detach(token) + + def shutdown(self) -> None: + self.span_exporter.shutdown() + + def force_flush(self, timeout_millis: int = 30000) -> bool: + # pylint: disable=unused-argument + return True + + +class _FlushRequest: + """Represents a request for the BatchSpanProcessor to flush spans.""" + + __slots__ = ["event", "num_spans"] + + def __init__(self): + self.event = threading.Event() + self.num_spans = 0 + + +_BSP_RESET_ONCE = Once() + + +class BatchSpanProcessor(SpanProcessor): + """Batch span processor implementation. + + `BatchSpanProcessor` is an implementation of `SpanProcessor` that + batches ended spans and pushes them to the configured `SpanExporter`. + + `BatchSpanProcessor` is configurable with the following environment + variables which correspond to constructor parameters: + + - :envvar:`OTEL_BSP_SCHEDULE_DELAY` + - :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` + - :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + - :envvar:`OTEL_BSP_EXPORT_TIMEOUT` + """ + + def __init__( + self, + span_exporter: SpanExporter, + max_queue_size: int | None = None, + schedule_delay_millis: float | None = None, + max_export_batch_size: int | None = None, + export_timeout_millis: float | None = None, + ): + if max_queue_size is None: + max_queue_size = BatchSpanProcessor._default_max_queue_size() + + if schedule_delay_millis is None: + schedule_delay_millis = ( + BatchSpanProcessor._default_schedule_delay_millis() + ) + + if max_export_batch_size is None: + max_export_batch_size = ( + BatchSpanProcessor._default_max_export_batch_size() + ) + + if export_timeout_millis is None: + export_timeout_millis = ( + BatchSpanProcessor._default_export_timeout_millis() + ) + + BatchSpanProcessor._validate_arguments( + max_queue_size, schedule_delay_millis, max_export_batch_size + ) + + self.span_exporter = span_exporter + self.queue = collections.deque([], max_queue_size) # type: typing.Deque[Span] + self.worker_thread = threading.Thread( + name="OtelBatchSpanProcessor", target=self.worker, daemon=True + ) + self.condition = threading.Condition(threading.Lock()) + self._flush_request = None # type: typing.Optional[_FlushRequest] + self.schedule_delay_millis = schedule_delay_millis + self.max_export_batch_size = max_export_batch_size + self.max_queue_size = max_queue_size + self.export_timeout_millis = export_timeout_millis + self.done = False + # flag that indicates that spans are being dropped + self._spans_dropped = False + # precallocated list to send spans to exporter + self.spans_list = [None] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]] + self.worker_thread.start() + if hasattr(os, "register_at_fork"): + os.register_at_fork(after_in_child=self._at_fork_reinit) # pylint: disable=protected-access + self._pid = os.getpid() + + def on_start( + self, span: Span, parent_context: Context | None = None + ) -> None: + pass + + def on_end(self, span: ReadableSpan) -> None: + if self.done: + logger.warning("Already shutdown, dropping span.") + return + if not span.context.trace_flags.sampled: + return + if self._pid != os.getpid(): + _BSP_RESET_ONCE.do_once(self._at_fork_reinit) + + if len(self.queue) == self.max_queue_size: + if not self._spans_dropped: + logger.warning("Queue is full, likely spans will be dropped.") + self._spans_dropped = True + + self.queue.appendleft(span) + + if len(self.queue) >= self.max_export_batch_size: + with self.condition: + self.condition.notify() + + def _at_fork_reinit(self): + self.condition = threading.Condition(threading.Lock()) + self.queue.clear() + + # worker_thread is local to a process, only the thread that issued fork continues + # to exist. A new worker thread must be started in child process. + self.worker_thread = threading.Thread( + name="OtelBatchSpanProcessor", target=self.worker, daemon=True + ) + self.worker_thread.start() + self._pid = os.getpid() + + def worker(self): + timeout = self.schedule_delay_millis / 1e3 + flush_request = None # type: typing.Optional[_FlushRequest] + while not self.done: + with self.condition: + if self.done: + # done flag may have changed, avoid waiting + break + flush_request = self._get_and_unset_flush_request() + if ( + len(self.queue) < self.max_export_batch_size + and flush_request is None + ): + self.condition.wait(timeout) + flush_request = self._get_and_unset_flush_request() + if not self.queue: + # spurious notification, let's wait again, reset timeout + timeout = self.schedule_delay_millis / 1e3 + self._notify_flush_request_finished(flush_request) + flush_request = None + continue + if self.done: + # missing spans will be sent when calling flush + break + + # subtract the duration of this export call to the next timeout + start = time_ns() + self._export(flush_request) + end = time_ns() + duration = (end - start) / 1e9 + timeout = self.schedule_delay_millis / 1e3 - duration + + self._notify_flush_request_finished(flush_request) + flush_request = None + + # there might have been a new flush request while export was running + # and before the done flag switched to true + with self.condition: + shutdown_flush_request = self._get_and_unset_flush_request() + + # be sure that all spans are sent + self._drain_queue() + self._notify_flush_request_finished(flush_request) + self._notify_flush_request_finished(shutdown_flush_request) + + def _get_and_unset_flush_request( + self, + ) -> typing.Optional[_FlushRequest]: + """Returns the current flush request and makes it invisible to the + worker thread for subsequent calls. + """ + flush_request = self._flush_request + self._flush_request = None + if flush_request is not None: + flush_request.num_spans = len(self.queue) + return flush_request + + @staticmethod + def _notify_flush_request_finished( + flush_request: typing.Optional[_FlushRequest], + ): + """Notifies the flush initiator(s) waiting on the given request/event + that the flush operation was finished. + """ + if flush_request is not None: + flush_request.event.set() + + def _get_or_create_flush_request(self) -> _FlushRequest: + """Either returns the current active flush event or creates a new one. + + The flush event will be visible and read by the worker thread before an + export operation starts. Callers of a flush operation may wait on the + returned event to be notified when the flush/export operation was + finished. + + This method is not thread-safe, i.e. callers need to take care about + synchronization/locking. + """ + if self._flush_request is None: + self._flush_request = _FlushRequest() + return self._flush_request + + def _export(self, flush_request: typing.Optional[_FlushRequest]): + """Exports spans considering the given flush_request. + + In case of a given flush_requests spans are exported in batches until + the number of exported spans reached or exceeded the number of spans in + the flush request. + In no flush_request was given at most max_export_batch_size spans are + exported. + """ + if not flush_request: + self._export_batch() + return + + num_spans = flush_request.num_spans + while self.queue: + num_exported = self._export_batch() + num_spans -= num_exported + + if num_spans <= 0: + break + + def _export_batch(self) -> int: + """Exports at most max_export_batch_size spans and returns the number of + exported spans. + """ + idx = 0 + # currently only a single thread acts as consumer, so queue.pop() will + # not raise an exception + while idx < self.max_export_batch_size and self.queue: + self.spans_list[idx] = self.queue.pop() + idx += 1 + token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) + try: + # Ignore type b/c the Optional[None]+slicing is too "clever" + # for mypy + self.span_exporter.export(self.spans_list[:idx]) # type: ignore + except Exception: # pylint: disable=broad-exception-caught + logger.exception("Exception while exporting Span batch.") + detach(token) + + # clean up list + for index in range(idx): + self.spans_list[index] = None + return idx + + def _drain_queue(self): + """Export all elements until queue is empty. + + Can only be called from the worker thread context because it invokes + `export` that is not thread safe. + """ + while self.queue: + self._export_batch() + + def force_flush(self, timeout_millis: int | None = None) -> bool: + if timeout_millis is None: + timeout_millis = self.export_timeout_millis + + if self.done: + logger.warning("Already shutdown, ignoring call to force_flush().") + return True + + with self.condition: + flush_request = self._get_or_create_flush_request() + # signal the worker thread to flush and wait for it to finish + self.condition.notify_all() + + # wait for token to be processed + ret = flush_request.event.wait(timeout_millis / 1e3) + if not ret: + logger.warning("Timeout was exceeded in force_flush().") + return ret + + def shutdown(self) -> None: + # signal the worker thread to finish and then wait for it + self.done = True + with self.condition: + self.condition.notify_all() + self.worker_thread.join() + self.span_exporter.shutdown() + + @staticmethod + def _default_max_queue_size(): + try: + return int( + environ.get(OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) + ) + except ValueError: + logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BSP_MAX_QUEUE_SIZE, + _DEFAULT_MAX_QUEUE_SIZE, + ) + return _DEFAULT_MAX_QUEUE_SIZE + + @staticmethod + def _default_schedule_delay_millis(): + try: + return int( + environ.get( + OTEL_BSP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS + ) + ) + except ValueError: + logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BSP_SCHEDULE_DELAY, + _DEFAULT_SCHEDULE_DELAY_MILLIS, + ) + return _DEFAULT_SCHEDULE_DELAY_MILLIS + + @staticmethod + def _default_max_export_batch_size(): + try: + return int( + environ.get( + OTEL_BSP_MAX_EXPORT_BATCH_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + ) + ) + except ValueError: + logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BSP_MAX_EXPORT_BATCH_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + ) + return _DEFAULT_MAX_EXPORT_BATCH_SIZE + + @staticmethod + def _default_export_timeout_millis(): + try: + return int( + environ.get( + OTEL_BSP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS + ) + ) + except ValueError: + logger.exception( + _ENV_VAR_INT_VALUE_ERROR_MESSAGE, + OTEL_BSP_EXPORT_TIMEOUT, + _DEFAULT_EXPORT_TIMEOUT_MILLIS, + ) + return _DEFAULT_EXPORT_TIMEOUT_MILLIS + + @staticmethod + def _validate_arguments( + max_queue_size, schedule_delay_millis, max_export_batch_size + ): + if max_queue_size <= 0: + raise ValueError("max_queue_size must be a positive integer.") + + if schedule_delay_millis <= 0: + raise ValueError("schedule_delay_millis must be positive.") + + if max_export_batch_size <= 0: + raise ValueError( + "max_export_batch_size must be a positive integer." + ) + + if max_export_batch_size > max_queue_size: + raise ValueError( + "max_export_batch_size must be less than or equal to max_queue_size." + ) + + +class ConsoleSpanExporter(SpanExporter): + """Implementation of :class:`SpanExporter` that prints spans to the + console. + + This class can be used for diagnostic purposes. It prints the exported + spans to the console STDOUT. + """ + + def __init__( + self, + service_name: str | None = None, + out: typing.IO = sys.stdout, + formatter: typing.Callable[ + [ReadableSpan], str + ] = lambda span: span.to_json() + linesep, + ): + self.out = out + self.formatter = formatter + self.service_name = service_name + + def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: + for span in spans: + self.out.write(self.formatter(span)) + self.out.flush() + return SpanExportResult.SUCCESS + + def force_flush(self, timeout_millis: int = 30000) -> bool: + return True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py new file mode 100644 index 00000000..c28ecfd2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/export/in_memory_span_exporter.py @@ -0,0 +1,61 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import typing + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult + + +class InMemorySpanExporter(SpanExporter): + """Implementation of :class:`.SpanExporter` that stores spans in memory. + + This class can be used for testing purposes. It stores the exported spans + in a list in memory that can be retrieved using the + :func:`.get_finished_spans` method. + """ + + def __init__(self) -> None: + self._finished_spans: typing.List[ReadableSpan] = [] + self._stopped = False + self._lock = threading.Lock() + + def clear(self) -> None: + """Clear list of collected spans.""" + with self._lock: + self._finished_spans.clear() + + def get_finished_spans(self) -> typing.Tuple[ReadableSpan, ...]: + """Get list of collected spans.""" + with self._lock: + return tuple(self._finished_spans) + + def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: + """Stores a list of spans in memory.""" + if self._stopped: + return SpanExportResult.FAILURE + with self._lock: + self._finished_spans.extend(spans) + return SpanExportResult.SUCCESS + + def shutdown(self) -> None: + """Shut downs the exporter. + + Calls to export after the exporter has been shut down will fail. + """ + self._stopped = True + + def force_flush(self, timeout_millis: int = 30000) -> bool: + return True diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py new file mode 100644 index 00000000..cd1f89bc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/id_generator.py @@ -0,0 +1,60 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import random + +from opentelemetry import trace + + +class IdGenerator(abc.ABC): + @abc.abstractmethod + def generate_span_id(self) -> int: + """Get a new span ID. + + Returns: + A 64-bit int for use as a span ID + """ + + @abc.abstractmethod + def generate_trace_id(self) -> int: + """Get a new trace ID. + + Implementations should at least make the 64 least significant bits + uniformly random. Samplers like the `TraceIdRatioBased` sampler rely on + this randomness to make sampling decisions. + + See `the specification on TraceIdRatioBased <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#traceidratiobased>`_. + + Returns: + A 128-bit int for use as a trace ID + """ + + +class RandomIdGenerator(IdGenerator): + """The default ID generator for TracerProvider which randomly generates all + bits when generating IDs. + """ + + def generate_span_id(self) -> int: + span_id = random.getrandbits(64) + while span_id == trace.INVALID_SPAN_ID: + span_id = random.getrandbits(64) + return span_id + + def generate_trace_id(self) -> int: + trace_id = random.getrandbits(128) + while trace_id == trace.INVALID_TRACE_ID: + trace_id = random.getrandbits(128) + return trace_id diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py new file mode 100644 index 00000000..fb6990a0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/trace/sampling.py @@ -0,0 +1,453 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +For general information about sampling, see `the specification <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#sampling>`_. + +OpenTelemetry provides two types of samplers: + +- `StaticSampler` +- `TraceIdRatioBased` + +A `StaticSampler` always returns the same sampling result regardless of the conditions. Both possible StaticSamplers are already created: + +- Always sample spans: ALWAYS_ON +- Never sample spans: ALWAYS_OFF + +A `TraceIdRatioBased` sampler makes a random sampling result based on the sampling probability given. + +If the span being sampled has a parent, `ParentBased` will respect the parent delegate sampler. Otherwise, it returns the sampling result from the given root sampler. + +Currently, sampling results are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 <https://github.com/open-telemetry/oteps/pull/115>`_). + +Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample` as well as `Sampler.get_description`. + +Samplers are able to modify the `opentelemetry.trace.span.TraceState` of the parent of the span being created. For custom samplers, it is suggested to implement `Sampler.should_sample` to utilize the +parent span context's `opentelemetry.trace.span.TraceState` and pass into the `SamplingResult` instead of the explicit trace_state field passed into the parameter of `Sampler.should_sample`. + +To use a sampler, pass it into the tracer provider constructor. For example: + +.. code:: python + + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SimpleSpanProcessor, + ) + from opentelemetry.sdk.trace.sampling import TraceIdRatioBased + + # sample 1 in every 1000 traces + sampler = TraceIdRatioBased(1/1000) + + # set the sampler onto the global tracer provider + trace.set_tracer_provider(TracerProvider(sampler=sampler)) + + # set up an exporter for sampled spans + trace.get_tracer_provider().add_span_processor( + SimpleSpanProcessor(ConsoleSpanExporter()) + ) + + # created spans will now be sampled by the TraceIdRatioBased sampler + with trace.get_tracer(__name__).start_as_current_span("Test Span"): + ... + +The tracer sampler can also be configured via environment variables ``OTEL_TRACES_SAMPLER`` and ``OTEL_TRACES_SAMPLER_ARG`` (only if applicable). +The list of built-in values for ``OTEL_TRACES_SAMPLER`` are: + + * always_on - Sampler that always samples spans, regardless of the parent span's sampling decision. + * always_off - Sampler that never samples spans, regardless of the parent span's sampling decision. + * traceidratio - Sampler that samples probabilistically based on rate. + * parentbased_always_on - (default) Sampler that respects its parent span's sampling decision, but otherwise always samples. + * parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples. + * parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabilistically based on rate. + +Sampling probability can be set with ``OTEL_TRACES_SAMPLER_ARG`` if the sampler is traceidratio or parentbased_traceidratio. Rate must be in the range [0.0,1.0]. When not provided rate will be set to +1.0 (maximum rate possible). + +Prev example but with environment variables. Please make sure to set the env ``OTEL_TRACES_SAMPLER=traceidratio`` and ``OTEL_TRACES_SAMPLER_ARG=0.001``. + +.. code:: python + + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SimpleSpanProcessor, + ) + + trace.set_tracer_provider(TracerProvider()) + + # set up an exporter for sampled spans + trace.get_tracer_provider().add_span_processor( + SimpleSpanProcessor(ConsoleSpanExporter()) + ) + + # created spans will now be sampled by the TraceIdRatioBased sampler with rate 1/1000. + with trace.get_tracer(__name__).start_as_current_span("Test Span"): + ... + +When utilizing a configurator, you can configure a custom sampler. In order to create a configurable custom sampler, create an entry point for the custom sampler +factory method or function under the entry point group, ``opentelemetry_traces_sampler``. The custom sampler factory method must be of type ``Callable[[str], Sampler]``, taking a single string argument and +returning a Sampler object. The single input will come from the string value of the ``OTEL_TRACES_SAMPLER_ARG`` environment variable. If ``OTEL_TRACES_SAMPLER_ARG`` is not configured, the input will +be an empty string. For example: + +.. code:: python + + setup( + ... + entry_points={ + ... + "opentelemetry_traces_sampler": [ + "custom_sampler_name = path.to.sampler.factory.method:CustomSamplerFactory.get_sampler" + ] + } + ) + # ... + class CustomRatioSampler(Sampler): + def __init__(rate): + # ... + # ... + class CustomSamplerFactory: + @staticmethod + def get_sampler(sampler_argument): + try: + rate = float(sampler_argument) + return CustomSampler(rate) + except ValueError: # In case argument is empty string. + return CustomSampler(0.5) + +In order to configure you application with a custom sampler's entry point, set the ``OTEL_TRACES_SAMPLER`` environment variable to the key name of the entry point. For example, to configured the +above sampler, set ``OTEL_TRACES_SAMPLER=custom_sampler_name`` and ``OTEL_TRACES_SAMPLER_ARG=0.5``. +""" + +import abc +import enum +import os +from logging import getLogger +from types import MappingProxyType +from typing import Optional, Sequence + +# pylint: disable=unused-import +from opentelemetry.context import Context +from opentelemetry.sdk.environment_variables import ( + OTEL_TRACES_SAMPLER, + OTEL_TRACES_SAMPLER_ARG, +) +from opentelemetry.trace import Link, SpanKind, get_current_span +from opentelemetry.trace.span import TraceState +from opentelemetry.util.types import Attributes + +_logger = getLogger(__name__) + + +class Decision(enum.Enum): + # IsRecording() == false, span will not be recorded and all events and attributes will be dropped. + DROP = 0 + # IsRecording() == true, but Sampled flag MUST NOT be set. + RECORD_ONLY = 1 + # IsRecording() == true AND Sampled flag` MUST be set. + RECORD_AND_SAMPLE = 2 + + def is_recording(self): + return self in (Decision.RECORD_ONLY, Decision.RECORD_AND_SAMPLE) + + def is_sampled(self): + return self is Decision.RECORD_AND_SAMPLE + + +class SamplingResult: + """A sampling result as applied to a newly-created Span. + + Args: + decision: A sampling decision based off of whether the span is recorded + and the sampled flag in trace flags in the span context. + attributes: Attributes to add to the `opentelemetry.trace.Span`. + trace_state: The tracestate used for the `opentelemetry.trace.Span`. + Could possibly have been modified by the sampler. + """ + + def __repr__(self) -> str: + return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})" + + def __init__( + self, + decision: Decision, + attributes: "Attributes" = None, + trace_state: Optional["TraceState"] = None, + ) -> None: + self.decision = decision + if attributes is None: + self.attributes = MappingProxyType({}) + else: + self.attributes = MappingProxyType(attributes) + self.trace_state = trace_state + + +class Sampler(abc.ABC): + @abc.abstractmethod + def should_sample( + self, + parent_context: Optional["Context"], + trace_id: int, + name: str, + kind: Optional[SpanKind] = None, + attributes: Attributes = None, + links: Optional[Sequence["Link"]] = None, + trace_state: Optional["TraceState"] = None, + ) -> "SamplingResult": + pass + + @abc.abstractmethod + def get_description(self) -> str: + pass + + +class StaticSampler(Sampler): + """Sampler that always returns the same decision.""" + + def __init__(self, decision: "Decision") -> None: + self._decision = decision + + def should_sample( + self, + parent_context: Optional["Context"], + trace_id: int, + name: str, + kind: Optional[SpanKind] = None, + attributes: Attributes = None, + links: Optional[Sequence["Link"]] = None, + trace_state: Optional["TraceState"] = None, + ) -> "SamplingResult": + if self._decision is Decision.DROP: + attributes = None + return SamplingResult( + self._decision, + attributes, + _get_parent_trace_state(parent_context), + ) + + def get_description(self) -> str: + if self._decision is Decision.DROP: + return "AlwaysOffSampler" + return "AlwaysOnSampler" + + +ALWAYS_OFF = StaticSampler(Decision.DROP) +"""Sampler that never samples spans, regardless of the parent span's sampling decision.""" + +ALWAYS_ON = StaticSampler(Decision.RECORD_AND_SAMPLE) +"""Sampler that always samples spans, regardless of the parent span's sampling decision.""" + + +class TraceIdRatioBased(Sampler): + """ + Sampler that makes sampling decisions probabilistically based on `rate`. + + Args: + rate: Probability (between 0 and 1) that a span will be sampled + """ + + def __init__(self, rate: float): + if rate < 0.0 or rate > 1.0: + raise ValueError("Probability must be in range [0.0, 1.0].") + self._rate = rate + self._bound = self.get_bound_for_rate(self._rate) + + # For compatibility with 64 bit trace IDs, the sampler checks the 64 + # low-order bits of the trace ID to decide whether to sample a given trace. + TRACE_ID_LIMIT = (1 << 64) - 1 + + @classmethod + def get_bound_for_rate(cls, rate: float) -> int: + return round(rate * (cls.TRACE_ID_LIMIT + 1)) + + @property + def rate(self) -> float: + return self._rate + + @property + def bound(self) -> int: + return self._bound + + def should_sample( + self, + parent_context: Optional["Context"], + trace_id: int, + name: str, + kind: Optional[SpanKind] = None, + attributes: Attributes = None, + links: Optional[Sequence["Link"]] = None, + trace_state: Optional["TraceState"] = None, + ) -> "SamplingResult": + decision = Decision.DROP + if trace_id & self.TRACE_ID_LIMIT < self.bound: + decision = Decision.RECORD_AND_SAMPLE + if decision is Decision.DROP: + attributes = None + return SamplingResult( + decision, + attributes, + _get_parent_trace_state(parent_context), + ) + + def get_description(self) -> str: + return f"TraceIdRatioBased{{{self._rate}}}" + + +class ParentBased(Sampler): + """ + If a parent is set, applies the respective delegate sampler. + Otherwise, uses the root provided at initialization to make a + decision. + + Args: + root: Sampler called for spans with no parent (root spans). + remote_parent_sampled: Sampler called for a remote sampled parent. + remote_parent_not_sampled: Sampler called for a remote parent that is + not sampled. + local_parent_sampled: Sampler called for a local sampled parent. + local_parent_not_sampled: Sampler called for a local parent that is + not sampled. + """ + + def __init__( + self, + root: Sampler, + remote_parent_sampled: Sampler = ALWAYS_ON, + remote_parent_not_sampled: Sampler = ALWAYS_OFF, + local_parent_sampled: Sampler = ALWAYS_ON, + local_parent_not_sampled: Sampler = ALWAYS_OFF, + ): + self._root = root + self._remote_parent_sampled = remote_parent_sampled + self._remote_parent_not_sampled = remote_parent_not_sampled + self._local_parent_sampled = local_parent_sampled + self._local_parent_not_sampled = local_parent_not_sampled + + def should_sample( + self, + parent_context: Optional["Context"], + trace_id: int, + name: str, + kind: Optional[SpanKind] = None, + attributes: Attributes = None, + links: Optional[Sequence["Link"]] = None, + trace_state: Optional["TraceState"] = None, + ) -> "SamplingResult": + parent_span_context = get_current_span( + parent_context + ).get_span_context() + # default to the root sampler + sampler = self._root + # respect the sampling and remote flag of the parent if present + if parent_span_context is not None and parent_span_context.is_valid: + if parent_span_context.is_remote: + if parent_span_context.trace_flags.sampled: + sampler = self._remote_parent_sampled + else: + sampler = self._remote_parent_not_sampled + else: + if parent_span_context.trace_flags.sampled: + sampler = self._local_parent_sampled + else: + sampler = self._local_parent_not_sampled + + return sampler.should_sample( + parent_context=parent_context, + trace_id=trace_id, + name=name, + kind=kind, + attributes=attributes, + links=links, + ) + + def get_description(self): + return f"ParentBased{{root:{self._root.get_description()},remoteParentSampled:{self._remote_parent_sampled.get_description()},remoteParentNotSampled:{self._remote_parent_not_sampled.get_description()},localParentSampled:{self._local_parent_sampled.get_description()},localParentNotSampled:{self._local_parent_not_sampled.get_description()}}}" + + +DEFAULT_OFF = ParentBased(ALWAYS_OFF) +"""Sampler that respects its parent span's sampling decision, but otherwise never samples.""" + +DEFAULT_ON = ParentBased(ALWAYS_ON) +"""Sampler that respects its parent span's sampling decision, but otherwise always samples.""" + + +class ParentBasedTraceIdRatio(ParentBased): + """ + Sampler that respects its parent span's sampling decision, but otherwise + samples probabilistically based on `rate`. + """ + + def __init__(self, rate: float): + root = TraceIdRatioBased(rate=rate) + super().__init__(root=root) + + +class _AlwaysOff(StaticSampler): + def __init__(self, _): + super().__init__(Decision.DROP) + + +class _AlwaysOn(StaticSampler): + def __init__(self, _): + super().__init__(Decision.RECORD_AND_SAMPLE) + + +class _ParentBasedAlwaysOff(ParentBased): + def __init__(self, _): + super().__init__(ALWAYS_OFF) + + +class _ParentBasedAlwaysOn(ParentBased): + def __init__(self, _): + super().__init__(ALWAYS_ON) + + +_KNOWN_SAMPLERS = { + "always_on": ALWAYS_ON, + "always_off": ALWAYS_OFF, + "parentbased_always_on": DEFAULT_ON, + "parentbased_always_off": DEFAULT_OFF, + "traceidratio": TraceIdRatioBased, + "parentbased_traceidratio": ParentBasedTraceIdRatio, +} + + +def _get_from_env_or_default() -> Sampler: + trace_sampler = os.getenv( + OTEL_TRACES_SAMPLER, "parentbased_always_on" + ).lower() + if trace_sampler not in _KNOWN_SAMPLERS: + _logger.warning("Couldn't recognize sampler %s.", trace_sampler) + trace_sampler = "parentbased_always_on" + + if trace_sampler in ("traceidratio", "parentbased_traceidratio"): + try: + rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG)) + except (ValueError, TypeError): + _logger.warning("Could not convert TRACES_SAMPLER_ARG to float.") + rate = 1.0 + return _KNOWN_SAMPLERS[trace_sampler](rate) + + return _KNOWN_SAMPLERS[trace_sampler] + + +def _get_parent_trace_state( + parent_context: Optional[Context], +) -> Optional["TraceState"]: + parent_span_context = get_current_span(parent_context).get_span_context() + if parent_span_context is None or not parent_span_context.is_valid: + return None + return parent_span_context.trace_state diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py new file mode 100644 index 00000000..68f10ddc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.py @@ -0,0 +1,152 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import threading +from collections import deque +from collections.abc import MutableMapping, Sequence +from typing import Optional + +from deprecated import deprecated + + +def ns_to_iso_str(nanoseconds): + """Get an ISO 8601 string from time_ns value.""" + ts = datetime.datetime.fromtimestamp( + nanoseconds / 1e9, tz=datetime.timezone.utc + ) + return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + +def get_dict_as_key(labels): + """Converts a dict to be used as a unique key""" + return tuple( + sorted( + map( + lambda kv: ( + (kv[0], tuple(kv[1])) if isinstance(kv[1], list) else kv + ), + labels.items(), + ) + ) + ) + + +class BoundedList(Sequence): + """An append only list with a fixed max size. + + Calls to `append` and `extend` will drop the oldest elements if there is + not enough room. + """ + + def __init__(self, maxlen: Optional[int]): + self.dropped = 0 + self._dq = deque(maxlen=maxlen) # type: deque + self._lock = threading.Lock() + + def __repr__(self): + return f"{type(self).__name__}({list(self._dq)}, maxlen={self._dq.maxlen})" + + def __getitem__(self, index): + return self._dq[index] + + def __len__(self): + return len(self._dq) + + def __iter__(self): + with self._lock: + return iter(deque(self._dq)) + + def append(self, item): + with self._lock: + if ( + self._dq.maxlen is not None + and len(self._dq) == self._dq.maxlen + ): + self.dropped += 1 + self._dq.append(item) + + def extend(self, seq): + with self._lock: + if self._dq.maxlen is not None: + to_drop = len(seq) + len(self._dq) - self._dq.maxlen + if to_drop > 0: + self.dropped += to_drop + self._dq.extend(seq) + + @classmethod + def from_seq(cls, maxlen, seq): + seq = tuple(seq) + bounded_list = cls(maxlen) + bounded_list.extend(seq) + return bounded_list + + +@deprecated(version="1.4.0") # type: ignore +class BoundedDict(MutableMapping): + """An ordered dict with a fixed max capacity. + + Oldest elements are dropped when the dict is full and a new element is + added. + """ + + def __init__(self, maxlen: Optional[int]): + if maxlen is not None: + if not isinstance(maxlen, int): + raise ValueError + if maxlen < 0: + raise ValueError + self.maxlen = maxlen + self.dropped = 0 + self._dict = {} # type: dict + self._lock = threading.Lock() # type: threading.Lock + + def __repr__(self): + return ( + f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})" + ) + + def __getitem__(self, key): + return self._dict[key] + + def __setitem__(self, key, value): + with self._lock: + if self.maxlen is not None and self.maxlen == 0: + self.dropped += 1 + return + + if key in self._dict: + del self._dict[key] + elif self.maxlen is not None and len(self._dict) == self.maxlen: + del self._dict[next(iter(self._dict.keys()))] + self.dropped += 1 + self._dict[key] = value + + def __delitem__(self, key): + del self._dict[key] + + def __iter__(self): + with self._lock: + return iter(self._dict.copy()) + + def __len__(self): + return len(self._dict) + + @classmethod + def from_map(cls, maxlen, mapping): + mapping = dict(mapping) + bounded_dict = cls(maxlen) + for key, value in mapping.items(): + bounded_dict[key] = value + return bounded_dict diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi new file mode 100644 index 00000000..55042fcf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/__init__.pyi @@ -0,0 +1,74 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import ( + Iterable, + Iterator, + Mapping, + MutableMapping, + Sequence, + TypeVar, + overload, +) + +from opentelemetry.util.types import AttributesAsKey, AttributeValue + +_T = TypeVar("_T") +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + +def ns_to_iso_str(nanoseconds: int) -> str: ... +def get_dict_as_key( + labels: Mapping[str, AttributeValue], +) -> AttributesAsKey: ... + +# pylint: disable=no-self-use +class BoundedList(Sequence[_T]): + """An append only list with a fixed max size. + + Calls to `append` and `extend` will drop the oldest elements if there is + not enough room. + """ + + dropped: int + def __init__(self, maxlen: int): ... + def insert(self, index: int, value: _T) -> None: ... + @overload + def __getitem__(self, i: int) -> _T: ... + @overload + def __getitem__(self, s: slice) -> Sequence[_T]: ... + def __len__(self) -> int: ... + def append(self, item: _T) -> None: ... + def extend(self, seq: Sequence[_T]) -> None: ... + @classmethod + def from_seq(cls, maxlen: int, seq: Iterable[_T]) -> BoundedList[_T]: ... # pylint: disable=undefined-variable + +class BoundedDict(MutableMapping[_KT, _VT]): + """An ordered dict with a fixed max capacity. + + Oldest elements are dropped when the dict is full and a new element is + added. + """ + + dropped: int + def __init__(self, maxlen: int): ... + def __getitem__(self, k: _KT) -> _VT: ... + def __setitem__(self, k: _KT, v: _VT) -> None: ... + def __delitem__(self, v: _KT) -> None: ... + def __iter__(self) -> Iterator[_KT]: ... + def __len__(self) -> int: ... + @classmethod + def from_map( + cls, maxlen: int, mapping: Mapping[_KT, _VT] + ) -> BoundedDict[_KT, _VT]: ... # pylint: disable=undefined-variable diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py new file mode 100644 index 00000000..6b45bf2a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/util/instrumentation.py @@ -0,0 +1,167 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from json import dumps +from typing import Optional + +from deprecated import deprecated + +from opentelemetry.attributes import BoundedAttributes +from opentelemetry.util.types import Attributes + + +class InstrumentationInfo: + """Immutable information about an instrumentation library module. + + See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these + properties. + """ + + __slots__ = ("_name", "_version", "_schema_url") + + @deprecated(version="1.11.1", reason="You should use InstrumentationScope") + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + ): + self._name = name + self._version = version + if schema_url is None: + schema_url = "" + self._schema_url = schema_url + + def __repr__(self): + return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})" + + def __hash__(self): + return hash((self._name, self._version, self._schema_url)) + + def __eq__(self, value): + return type(value) is type(self) and ( + self._name, + self._version, + self._schema_url, + ) == (value._name, value._version, value._schema_url) + + def __lt__(self, value): + if type(value) is not type(self): + return NotImplemented + return (self._name, self._version, self._schema_url) < ( + value._name, + value._version, + value._schema_url, + ) + + @property + def schema_url(self) -> Optional[str]: + return self._schema_url + + @property + def version(self) -> Optional[str]: + return self._version + + @property + def name(self) -> str: + return self._name + + +class InstrumentationScope: + """A logical unit of the application code with which the emitted telemetry can be + associated. + + See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these + properties. + """ + + __slots__ = ("_name", "_version", "_schema_url", "_attributes") + + def __init__( + self, + name: str, + version: Optional[str] = None, + schema_url: Optional[str] = None, + attributes: Optional[Attributes] = None, + ) -> None: + self._name = name + self._version = version + if schema_url is None: + schema_url = "" + self._schema_url = schema_url + self._attributes = BoundedAttributes(attributes=attributes) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url}, {self._attributes})" + + def __hash__(self) -> int: + return hash((self._name, self._version, self._schema_url)) + + def __eq__(self, value: object) -> bool: + if not isinstance(value, InstrumentationScope): + return NotImplemented + return ( + self._name, + self._version, + self._schema_url, + self._attributes, + ) == ( + value._name, + value._version, + value._schema_url, + value._attributes, + ) + + def __lt__(self, value: object) -> bool: + if not isinstance(value, InstrumentationScope): + return NotImplemented + return ( + self._name, + self._version, + self._schema_url, + self._attributes, + ) < ( + value._name, + value._version, + value._schema_url, + value._attributes, + ) + + @property + def schema_url(self) -> Optional[str]: + return self._schema_url + + @property + def version(self) -> Optional[str]: + return self._version + + @property + def name(self) -> str: + return self._name + + @property + def attributes(self) -> Attributes: + return self._attributes + + def to_json(self, indent: Optional[int] = 4) -> str: + return dumps( + { + "name": self._name, + "version": self._version, + "schema_url": self._schema_url, + "attributes": ( + dict(self._attributes) if bool(self._attributes) else None + ), + }, + indent=indent, + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py new file mode 100644 index 00000000..09125bac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/sdk/version/__init__.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "1.31.1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/__init__.py diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/artifact_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/artifact_attributes.py new file mode 100644 index 00000000..4f062343 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/artifact_attributes.py @@ -0,0 +1,62 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +ARTIFACT_ATTESTATION_FILENAME: Final = "artifact.attestation.filename" +""" +The provenance filename of the built attestation which directly relates to the build artifact filename. This filename SHOULD accompany the artifact at publish time. See the [SLSA Relationship](https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations) specification for more information. +""" + +ARTIFACT_ATTESTATION_HASH: Final = "artifact.attestation.hash" +""" +The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the built attestation. Some envelopes in the [software attestation space](https://github.com/in-toto/attestation/tree/main/spec) also refer to this as the **digest**. +""" + +ARTIFACT_ATTESTATION_ID: Final = "artifact.attestation.id" +""" +The id of the build [software attestation](https://slsa.dev/attestation-model). +""" + +ARTIFACT_FILENAME: Final = "artifact.filename" +""" +The human readable file name of the artifact, typically generated during build and release processes. Often includes the package name and version in the file name. +Note: This file name can also act as the [Package Name](https://slsa.dev/spec/v1.0/terminology#package-model) +in cases where the package ecosystem maps accordingly. +Additionally, the artifact [can be published](https://slsa.dev/spec/v1.0/terminology#software-supply-chain) +for others, but that is not a guarantee. +""" + +ARTIFACT_HASH: Final = "artifact.hash" +""" +The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), often found in checksum.txt on a release of the artifact and used to verify package integrity. +Note: The specific algorithm used to create the cryptographic hash value is +not defined. In situations where an artifact has multiple +cryptographic hashes, it is up to the implementer to choose which +hash value to set here; this should be the most secure hash algorithm +that is suitable for the situation and consistent with the +corresponding attestation. The implementer can then provide the other +hash values through an additional set of attribute extensions as they +deem necessary. +""" + +ARTIFACT_PURL: Final = "artifact.purl" +""" +The [Package URL](https://github.com/package-url/purl-spec) of the [package artifact](https://slsa.dev/spec/v1.0/terminology#package-model) provides a standard way to identify and locate the packaged artifact. +""" + +ARTIFACT_VERSION: Final = "artifact.version" +""" +The version of the artifact. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/aws_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/aws_attributes.py new file mode 100644 index 00000000..70a991cc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/aws_attributes.py @@ -0,0 +1,298 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS: Final = ( + "aws.dynamodb.attribute_definitions" +) +""" +The JSON-serialized value of each item in the `AttributeDefinitions` request field. +""" + +AWS_DYNAMODB_ATTRIBUTES_TO_GET: Final = "aws.dynamodb.attributes_to_get" +""" +The value of the `AttributesToGet` request parameter. +""" + +AWS_DYNAMODB_CONSISTENT_READ: Final = "aws.dynamodb.consistent_read" +""" +The value of the `ConsistentRead` request parameter. +""" + +AWS_DYNAMODB_CONSUMED_CAPACITY: Final = "aws.dynamodb.consumed_capacity" +""" +The JSON-serialized value of each item in the `ConsumedCapacity` response field. +""" + +AWS_DYNAMODB_COUNT: Final = "aws.dynamodb.count" +""" +The value of the `Count` response parameter. +""" + +AWS_DYNAMODB_EXCLUSIVE_START_TABLE: Final = ( + "aws.dynamodb.exclusive_start_table" +) +""" +The value of the `ExclusiveStartTableName` request parameter. +""" + +AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: Final = ( + "aws.dynamodb.global_secondary_index_updates" +) +""" +The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates` request field. +""" + +AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES: Final = ( + "aws.dynamodb.global_secondary_indexes" +) +""" +The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. +""" + +AWS_DYNAMODB_INDEX_NAME: Final = "aws.dynamodb.index_name" +""" +The value of the `IndexName` request parameter. +""" + +AWS_DYNAMODB_ITEM_COLLECTION_METRICS: Final = ( + "aws.dynamodb.item_collection_metrics" +) +""" +The JSON-serialized value of the `ItemCollectionMetrics` response field. +""" + +AWS_DYNAMODB_LIMIT: Final = "aws.dynamodb.limit" +""" +The value of the `Limit` request parameter. +""" + +AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: Final = ( + "aws.dynamodb.local_secondary_indexes" +) +""" +The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. +""" + +AWS_DYNAMODB_PROJECTION: Final = "aws.dynamodb.projection" +""" +The value of the `ProjectionExpression` request parameter. +""" + +AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: Final = ( + "aws.dynamodb.provisioned_read_capacity" +) +""" +The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. +""" + +AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY: Final = ( + "aws.dynamodb.provisioned_write_capacity" +) +""" +The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. +""" + +AWS_DYNAMODB_SCAN_FORWARD: Final = "aws.dynamodb.scan_forward" +""" +The value of the `ScanIndexForward` request parameter. +""" + +AWS_DYNAMODB_SCANNED_COUNT: Final = "aws.dynamodb.scanned_count" +""" +The value of the `ScannedCount` response parameter. +""" + +AWS_DYNAMODB_SEGMENT: Final = "aws.dynamodb.segment" +""" +The value of the `Segment` request parameter. +""" + +AWS_DYNAMODB_SELECT: Final = "aws.dynamodb.select" +""" +The value of the `Select` request parameter. +""" + +AWS_DYNAMODB_TABLE_COUNT: Final = "aws.dynamodb.table_count" +""" +The number of items in the `TableNames` response parameter. +""" + +AWS_DYNAMODB_TABLE_NAMES: Final = "aws.dynamodb.table_names" +""" +The keys in the `RequestItems` object field. +""" + +AWS_DYNAMODB_TOTAL_SEGMENTS: Final = "aws.dynamodb.total_segments" +""" +The value of the `TotalSegments` request parameter. +""" + +AWS_ECS_CLUSTER_ARN: Final = "aws.ecs.cluster.arn" +""" +The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +""" + +AWS_ECS_CONTAINER_ARN: Final = "aws.ecs.container.arn" +""" +The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +""" + +AWS_ECS_LAUNCHTYPE: Final = "aws.ecs.launchtype" +""" +The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. +""" + +AWS_ECS_TASK_ARN: Final = "aws.ecs.task.arn" +""" +The ARN of a running [ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +""" + +AWS_ECS_TASK_FAMILY: Final = "aws.ecs.task.family" +""" +The family name of the [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) used to create the ECS task. +""" + +AWS_ECS_TASK_ID: Final = "aws.ecs.task.id" +""" +The ID of a running ECS task. The ID MUST be extracted from `task.arn`. +""" + +AWS_ECS_TASK_REVISION: Final = "aws.ecs.task.revision" +""" +The revision for the task definition used to create the ECS task. +""" + +AWS_EKS_CLUSTER_ARN: Final = "aws.eks.cluster.arn" +""" +The ARN of an EKS cluster. +""" + +AWS_EXTENDED_REQUEST_ID: Final = "aws.extended_request_id" +""" +The AWS extended request ID as returned in the response header `x-amz-id-2`. +""" + +AWS_LAMBDA_INVOKED_ARN: Final = "aws.lambda.invoked_arn" +""" +The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). +Note: This may be different from `cloud.resource_id` if an alias is involved. +""" + +AWS_LOG_GROUP_ARNS: Final = "aws.log.group.arns" +""" +The Amazon Resource Name(s) (ARN) of the AWS log group(s). +Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). +""" + +AWS_LOG_GROUP_NAMES: Final = "aws.log.group.names" +""" +The name(s) of the AWS log group(s) an application is writing to. +Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group. +""" + +AWS_LOG_STREAM_ARNS: Final = "aws.log.stream.arns" +""" +The ARN(s) of the AWS log stream(s). +Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream. +""" + +AWS_LOG_STREAM_NAMES: Final = "aws.log.stream.names" +""" +The name(s) of the AWS log stream(s) an application is writing to. +""" + +AWS_REQUEST_ID: Final = "aws.request_id" +""" +The AWS request ID as returned in the response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`. +""" + +AWS_S3_BUCKET: Final = "aws.s3.bucket" +""" +The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. +Note: The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. +This applies to almost all S3 operations except `list-buckets`. +""" + +AWS_S3_COPY_SOURCE: Final = "aws.s3.copy_source" +""" +The source object (in the form `bucket`/`key`) for the copy operation. +Note: The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter +of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). +This applies in particular to the following operations: + +- [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) +- [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). +""" + +AWS_S3_DELETE: Final = "aws.s3.delete" +""" +The delete request container that specifies the objects to be deleted. +Note: The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. +The `delete` attribute corresponds to the `--delete` parameter of the +[delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). +""" + +AWS_S3_KEY: Final = "aws.s3.key" +""" +The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. +Note: The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. +This applies in particular to the following operations: + +- [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) +- [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) +- [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) +- [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) +- [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) +- [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) +- [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) +- [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) +- [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) +- [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) +- [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) +- [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) +- [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). +""" + +AWS_S3_PART_NUMBER: Final = "aws.s3.part_number" +""" +The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. +Note: The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) +and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. +The `part_number` attribute corresponds to the `--part-number` parameter of the +[upload-part operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). +""" + +AWS_S3_UPLOAD_ID: Final = "aws.s3.upload_id" +""" +Upload ID that identifies the multipart upload. +Note: The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter +of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. +This applies in particular to the following operations: + +- [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) +- [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) +- [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) +- [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) +- [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). +""" + + +class AwsEcsLaunchtypeValues(Enum): + EC2 = "ec2" + """ec2.""" + FARGATE = "fargate" + """fargate.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/az_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/az_attributes.py new file mode 100644 index 00000000..f5569653 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/az_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +AZ_NAMESPACE: Final = "az.namespace" +""" +[Azure Resource Provider Namespace](https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers) as recognized by the client. +""" + +AZ_SERVICE_REQUEST_ID: Final = "az.service_request_id" +""" +The unique identifier of the service request. It's generated by the Azure service and returned with the response. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/azure_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/azure_attributes.py new file mode 100644 index 00000000..2f9d1372 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/azure_attributes.py @@ -0,0 +1,78 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +AZURE_CLIENT_ID: Final = "azure.client.id" +""" +The unique identifier of the client instance. +""" + +AZURE_COSMOSDB_CONNECTION_MODE: Final = "azure.cosmosdb.connection.mode" +""" +Cosmos client connection mode. +""" + +AZURE_COSMOSDB_CONSISTENCY_LEVEL: Final = "azure.cosmosdb.consistency.level" +""" +Account or request [consistency level](https://learn.microsoft.com/azure/cosmos-db/consistency-levels). +""" + +AZURE_COSMOSDB_OPERATION_CONTACTED_REGIONS: Final = ( + "azure.cosmosdb.operation.contacted_regions" +) +""" +List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. +Note: Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location). +""" + +AZURE_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = ( + "azure.cosmosdb.operation.request_charge" +) +""" +The number of request units consumed by the operation. +""" + +AZURE_COSMOSDB_REQUEST_BODY_SIZE: Final = "azure.cosmosdb.request.body.size" +""" +Request payload size in bytes. +""" + +AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE: Final = ( + "azure.cosmosdb.response.sub_status_code" +) +""" +Cosmos DB sub status code. +""" + + +class AzureCosmosdbConnectionModeValues(Enum): + GATEWAY = "gateway" + """Gateway (HTTP) connection.""" + DIRECT = "direct" + """Direct connection.""" + + +class AzureCosmosdbConsistencyLevelValues(Enum): + STRONG = "Strong" + """strong.""" + BOUNDED_STALENESS = "BoundedStaleness" + """bounded_staleness.""" + SESSION = "Session" + """session.""" + EVENTUAL = "Eventual" + """eventual.""" + CONSISTENT_PREFIX = "ConsistentPrefix" + """consistent_prefix.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/browser_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/browser_attributes.py new file mode 100644 index 00000000..7cb14085 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/browser_attributes.py @@ -0,0 +1,40 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +BROWSER_BRANDS: Final = "browser.brands" +""" +Array of brand name and version separated by a space. +Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). +""" + +BROWSER_LANGUAGE: Final = "browser.language" +""" +Preferred language of the user using the browser. +Note: This value is intended to be taken from the Navigator API `navigator.language`. +""" + +BROWSER_MOBILE: Final = "browser.mobile" +""" +A boolean that is true if the browser is running on a mobile device. +Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. +""" + +BROWSER_PLATFORM: Final = "browser.platform" +""" +The platform on which the browser is running. +Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. +The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py new file mode 100644 index 00000000..17fbd4ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cassandra_attributes.py @@ -0,0 +1,73 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +CASSANDRA_CONSISTENCY_LEVEL: Final = "cassandra.consistency.level" +""" +The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). +""" + +CASSANDRA_COORDINATOR_DC: Final = "cassandra.coordinator.dc" +""" +The data center of the coordinating node for a query. +""" + +CASSANDRA_COORDINATOR_ID: Final = "cassandra.coordinator.id" +""" +The ID of the coordinating node for a query. +""" + +CASSANDRA_PAGE_SIZE: Final = "cassandra.page.size" +""" +The fetch size used for paging, i.e. how many rows will be returned at once. +""" + +CASSANDRA_QUERY_IDEMPOTENT: Final = "cassandra.query.idempotent" +""" +Whether or not the query is idempotent. +""" + +CASSANDRA_SPECULATIVE_EXECUTION_COUNT: Final = ( + "cassandra.speculative_execution.count" +) +""" +The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. +""" + + +class CassandraConsistencyLevelValues(Enum): + ALL = "all" + """all.""" + EACH_QUORUM = "each_quorum" + """each_quorum.""" + QUORUM = "quorum" + """quorum.""" + LOCAL_QUORUM = "local_quorum" + """local_quorum.""" + ONE = "one" + """one.""" + TWO = "two" + """two.""" + THREE = "three" + """three.""" + LOCAL_ONE = "local_one" + """local_one.""" + ANY = "any" + """any.""" + SERIAL = "serial" + """serial.""" + LOCAL_SERIAL = "local_serial" + """local_serial.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cicd_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cicd_attributes.py new file mode 100644 index 00000000..945fc81d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cicd_attributes.py @@ -0,0 +1,113 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +CICD_PIPELINE_NAME: Final = "cicd.pipeline.name" +""" +The human readable name of the pipeline within a CI/CD system. +""" + +CICD_PIPELINE_RESULT: Final = "cicd.pipeline.result" +""" +The result of a pipeline run. +""" + +CICD_PIPELINE_RUN_ID: Final = "cicd.pipeline.run.id" +""" +The unique identifier of a pipeline run within a CI/CD system. +""" + +CICD_PIPELINE_RUN_STATE: Final = "cicd.pipeline.run.state" +""" +The pipeline run goes through these states during its lifecycle. +""" + +CICD_PIPELINE_RUN_URL_FULL: Final = "cicd.pipeline.run.url.full" +""" +The [URL](https://wikipedia.org/wiki/URL) of the pipeline run, providing the complete address in order to locate and identify the pipeline run. +""" + +CICD_PIPELINE_TASK_NAME: Final = "cicd.pipeline.task.name" +""" +The human readable name of a task within a pipeline. Task here most closely aligns with a [computing process](https://wikipedia.org/wiki/Pipeline_(computing)) in a pipeline. Other terms for tasks include commands, steps, and procedures. +""" + +CICD_PIPELINE_TASK_RUN_ID: Final = "cicd.pipeline.task.run.id" +""" +The unique identifier of a task run within a pipeline. +""" + +CICD_PIPELINE_TASK_RUN_URL_FULL: Final = "cicd.pipeline.task.run.url.full" +""" +The [URL](https://wikipedia.org/wiki/URL) of the pipeline task run, providing the complete address in order to locate and identify the pipeline task run. +""" + +CICD_PIPELINE_TASK_TYPE: Final = "cicd.pipeline.task.type" +""" +The type of the task within a pipeline. +""" + +CICD_SYSTEM_COMPONENT: Final = "cicd.system.component" +""" +The name of a component of the CICD system. +""" + +CICD_WORKER_STATE: Final = "cicd.worker.state" +""" +The state of a CICD worker / agent. +""" + + +class CicdPipelineResultValues(Enum): + SUCCESS = "success" + """The pipeline run finished successfully.""" + FAILURE = "failure" + """The pipeline run did not finish successfully, eg. due to a compile error or a failing test. Such failures are usually detected by non-zero exit codes of the tools executed in the pipeline run.""" + ERROR = "error" + """The pipeline run failed due to an error in the CICD system, eg. due to the worker being killed.""" + TIMEOUT = "timeout" + """A timeout caused the pipeline run to be interrupted.""" + CANCELLATION = "cancellation" + """The pipeline run was cancelled, eg. by a user manually cancelling the pipeline run.""" + SKIP = "skip" + """The pipeline run was skipped, eg. due to a precondition not being met.""" + + +class CicdPipelineRunStateValues(Enum): + PENDING = "pending" + """The run pending state spans from the event triggering the pipeline run until the execution of the run starts (eg. time spent in a queue, provisioning agents, creating run resources).""" + EXECUTING = "executing" + """The executing state spans the execution of any run tasks (eg. build, test).""" + FINALIZING = "finalizing" + """The finalizing state spans from when the run has finished executing (eg. cleanup of run resources).""" + + +class CicdPipelineTaskTypeValues(Enum): + BUILD = "build" + """build.""" + TEST = "test" + """test.""" + DEPLOY = "deploy" + """deploy.""" + + +class CicdWorkerStateValues(Enum): + AVAILABLE = "available" + """The worker is not performing work for the CICD system. It is available to the CICD system to perform work on (online / idle).""" + BUSY = "busy" + """The worker is performing work for the CICD system.""" + OFFLINE = "offline" + """The worker is not available to the CICD system (disconnected / down).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/client_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/client_attributes.py new file mode 100644 index 00000000..a6511e76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/client_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +CLIENT_ADDRESS: Final = "client.address" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.client_attributes.CLIENT_ADDRESS`. +""" + +CLIENT_PORT: Final = "client.port" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.client_attributes.CLIENT_PORT`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloud_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloud_attributes.py new file mode 100644 index 00000000..e32cf8d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloud_attributes.py @@ -0,0 +1,148 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +CLOUD_ACCOUNT_ID: Final = "cloud.account.id" +""" +The cloud account ID the resource is assigned to. +""" + +CLOUD_AVAILABILITY_ZONE: Final = "cloud.availability_zone" +""" +Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. +Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. +""" + +CLOUD_PLATFORM: Final = "cloud.platform" +""" +The cloud platform in use. +Note: The prefix of the service SHOULD match the one specified in `cloud.provider`. +""" + +CLOUD_PROVIDER: Final = "cloud.provider" +""" +Name of the cloud provider. +""" + +CLOUD_REGION: Final = "cloud.region" +""" +The geographical region the resource is running. +Note: Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). +""" + +CLOUD_RESOURCE_ID: Final = "cloud.resource_id" +""" +Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://google.aip.dev/122#full-resource-names) on GCP). +Note: On some cloud providers, it may not be possible to determine the full ID at startup, +so it may be necessary to set `cloud.resource_id` as a span attribute instead. + +The exact value to use for `cloud.resource_id` depends on the cloud provider. +The following well-known definitions MUST be used if you set this attribute and they apply: + +- **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + Take care not to use the "invoked ARN" directly but replace any + [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + with the resolved function version, as the same runtime instance may be invocable with + multiple different aliases. +- **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) +- **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, + *not* the function app, having the form + `/subscriptions/<SUBSCRIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`. + This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share + a TracerProvider. +""" + + +class CloudPlatformValues(Enum): + ALIBABA_CLOUD_ECS = "alibaba_cloud_ecs" + """Alibaba Cloud Elastic Compute Service.""" + ALIBABA_CLOUD_FC = "alibaba_cloud_fc" + """Alibaba Cloud Function Compute.""" + ALIBABA_CLOUD_OPENSHIFT = "alibaba_cloud_openshift" + """Red Hat OpenShift on Alibaba Cloud.""" + AWS_EC2 = "aws_ec2" + """AWS Elastic Compute Cloud.""" + AWS_ECS = "aws_ecs" + """AWS Elastic Container Service.""" + AWS_EKS = "aws_eks" + """AWS Elastic Kubernetes Service.""" + AWS_LAMBDA = "aws_lambda" + """AWS Lambda.""" + AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk" + """AWS Elastic Beanstalk.""" + AWS_APP_RUNNER = "aws_app_runner" + """AWS App Runner.""" + AWS_OPENSHIFT = "aws_openshift" + """Red Hat OpenShift on AWS (ROSA).""" + AZURE_VM = "azure_vm" + """Azure Virtual Machines.""" + AZURE_CONTAINER_APPS = "azure_container_apps" + """Azure Container Apps.""" + AZURE_CONTAINER_INSTANCES = "azure_container_instances" + """Azure Container Instances.""" + AZURE_AKS = "azure_aks" + """Azure Kubernetes Service.""" + AZURE_FUNCTIONS = "azure_functions" + """Azure Functions.""" + AZURE_APP_SERVICE = "azure_app_service" + """Azure App Service.""" + AZURE_OPENSHIFT = "azure_openshift" + """Azure Red Hat OpenShift.""" + GCP_BARE_METAL_SOLUTION = "gcp_bare_metal_solution" + """Google Bare Metal Solution (BMS).""" + GCP_COMPUTE_ENGINE = "gcp_compute_engine" + """Google Cloud Compute Engine (GCE).""" + GCP_CLOUD_RUN = "gcp_cloud_run" + """Google Cloud Run.""" + GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine" + """Google Cloud Kubernetes Engine (GKE).""" + GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions" + """Google Cloud Functions (GCF).""" + GCP_APP_ENGINE = "gcp_app_engine" + """Google Cloud App Engine (GAE).""" + GCP_OPENSHIFT = "gcp_openshift" + """Red Hat OpenShift on Google Cloud.""" + IBM_CLOUD_OPENSHIFT = "ibm_cloud_openshift" + """Red Hat OpenShift on IBM Cloud.""" + ORACLE_CLOUD_COMPUTE = "oracle_cloud_compute" + """Compute on Oracle Cloud Infrastructure (OCI).""" + ORACLE_CLOUD_OKE = "oracle_cloud_oke" + """Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI).""" + TENCENT_CLOUD_CVM = "tencent_cloud_cvm" + """Tencent Cloud Cloud Virtual Machine (CVM).""" + TENCENT_CLOUD_EKS = "tencent_cloud_eks" + """Tencent Cloud Elastic Kubernetes Service (EKS).""" + TENCENT_CLOUD_SCF = "tencent_cloud_scf" + """Tencent Cloud Serverless Cloud Function (SCF).""" + + +class CloudProviderValues(Enum): + ALIBABA_CLOUD = "alibaba_cloud" + """Alibaba Cloud.""" + AWS = "aws" + """Amazon Web Services.""" + AZURE = "azure" + """Microsoft Azure.""" + GCP = "gcp" + """Google Cloud Platform.""" + HEROKU = "heroku" + """Heroku Platform as a Service.""" + IBM_CLOUD = "ibm_cloud" + """IBM Cloud.""" + ORACLE_CLOUD = "oracle_cloud" + """Oracle Cloud Infrastructure (OCI).""" + TENCENT_CLOUD = "tencent_cloud" + """Tencent Cloud.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudevents_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudevents_attributes.py new file mode 100644 index 00000000..ca13ee99 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudevents_attributes.py @@ -0,0 +1,40 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +CLOUDEVENTS_EVENT_ID: Final = "cloudevents.event_id" +""" +The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. +""" + +CLOUDEVENTS_EVENT_SOURCE: Final = "cloudevents.event_source" +""" +The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. +""" + +CLOUDEVENTS_EVENT_SPEC_VERSION: Final = "cloudevents.event_spec_version" +""" +The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. +""" + +CLOUDEVENTS_EVENT_SUBJECT: Final = "cloudevents.event_subject" +""" +The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). +""" + +CLOUDEVENTS_EVENT_TYPE: Final = "cloudevents.event_type" +""" +The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudfoundry_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudfoundry_attributes.py new file mode 100644 index 00000000..31b2d85a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cloudfoundry_attributes.py @@ -0,0 +1,118 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +CLOUDFOUNDRY_APP_ID: Final = "cloudfoundry.app.id" +""" +The guid of the application. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.application_id`. This is the same value as +reported by `cf app <app-name> --guid`. +""" + +CLOUDFOUNDRY_APP_INSTANCE_ID: Final = "cloudfoundry.app.instance.id" +""" +The index of the application instance. 0 when just one instance is active. +Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +It is used for logs and metrics emitted by CloudFoundry. It is +supposed to contain the application instance index for applications +deployed on the runtime. + +Application instrumentation should use the value from environment +variable `CF_INSTANCE_INDEX`. +""" + +CLOUDFOUNDRY_APP_NAME: Final = "cloudfoundry.app.name" +""" +The name of the application. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.application_name`. This is the same value +as reported by `cf apps`. +""" + +CLOUDFOUNDRY_ORG_ID: Final = "cloudfoundry.org.id" +""" +The guid of the CloudFoundry org the application is running in. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.org_id`. This is the same value as +reported by `cf org <org-name> --guid`. +""" + +CLOUDFOUNDRY_ORG_NAME: Final = "cloudfoundry.org.name" +""" +The name of the CloudFoundry organization the app is running in. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.org_name`. This is the same value as +reported by `cf orgs`. +""" + +CLOUDFOUNDRY_PROCESS_ID: Final = "cloudfoundry.process.id" +""" +The UID identifying the process. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to +`VCAP_APPLICATION.app_id` for applications deployed to the runtime. +For system components, this could be the actual PID. +""" + +CLOUDFOUNDRY_PROCESS_TYPE: Final = "cloudfoundry.process.type" +""" +The type of process. +Note: CloudFoundry applications can consist of multiple jobs. Usually the +main process will be of type `web`. There can be additional background +tasks or side-cars with different process types. +""" + +CLOUDFOUNDRY_SPACE_ID: Final = "cloudfoundry.space.id" +""" +The guid of the CloudFoundry space the application is running in. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.space_id`. This is the same value as +reported by `cf space <space-name> --guid`. +""" + +CLOUDFOUNDRY_SPACE_NAME: Final = "cloudfoundry.space.name" +""" +The name of the CloudFoundry space the application is running in. +Note: Application instrumentation should use the value from environment +variable `VCAP_APPLICATION.space_name`. This is the same value as +reported by `cf spaces`. +""" + +CLOUDFOUNDRY_SYSTEM_ID: Final = "cloudfoundry.system.id" +""" +A guid or another name describing the event source. +Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +It is used for logs and metrics emitted by CloudFoundry. It is +supposed to contain the component name, e.g. "gorouter", for +CloudFoundry components. + +When system components are instrumented, values from the +[Bosh spec](https://bosh.io/docs/jobs/#properties-spec) +should be used. The `system.id` should be set to +`spec.deployment/spec.name`. +""" + +CLOUDFOUNDRY_SYSTEM_INSTANCE_ID: Final = "cloudfoundry.system.instance.id" +""" +A guid describing the concrete instance of the event source. +Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +It is used for logs and metrics emitted by CloudFoundry. It is +supposed to contain the vm id for CloudFoundry components. + +When system components are instrumented, values from the +[Bosh spec](https://bosh.io/docs/jobs/#properties-spec) +should be used. The `system.instance.id` should be set to `spec.id`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/code_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/code_attributes.py new file mode 100644 index 00000000..1ca1ecb2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/code_attributes.py @@ -0,0 +1,80 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +CODE_COLUMN: Final = "code.column" +""" +Deprecated: Replaced by `code.column.number`. +""" + +CODE_COLUMN_NUMBER: Final = "code.column.number" +""" +The column number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. +""" + +CODE_FILE_PATH: Final = "code.file.path" +""" +The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). +""" + +CODE_FILEPATH: Final = "code.filepath" +""" +Deprecated: Replaced by `code.file.path`. +""" + +CODE_FUNCTION: Final = "code.function" +""" +Deprecated: Replaced by `code.function.name`. +""" + +CODE_FUNCTION_NAME: Final = "code.function.name" +""" +The method or function fully-qualified name without arguments. The value should fit the natural representation of the language runtime, which is also likely the same used within `code.stacktrace` attribute value. +Note: Values and format depends on each language runtime, thus it is impossible to provide an exhaustive list of examples. +The values are usually the same (or prefixes of) the ones found in native stack trace representation stored in +`code.stacktrace` without information on arguments. + +Examples: + +* Java method: `com.example.MyHttpService.serveRequest` +* Java anonymous class method: `com.mycompany.Main$1.myMethod` +* Java lambda method: `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` +* PHP function: `GuzzleHttp\\Client::transfer +* Go function: `github.com/my/repo/pkg.foo.func5` +* Elixir: `OpenTelemetry.Ctx.new` +* Erlang: `opentelemetry_ctx:new` +* Rust: `playground::my_module::my_cool_func` +* C function: `fopen`. +""" + +CODE_LINE_NUMBER: Final = "code.line.number" +""" +The line number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. +""" + +CODE_LINENO: Final = "code.lineno" +""" +Deprecated: Replaced by `code.line.number`. +""" + +CODE_NAMESPACE: Final = "code.namespace" +""" +Deprecated: Value should be included in `code.function.name` which is expected to be a fully-qualified name. +""" + +CODE_STACKTRACE: Final = "code.stacktrace" +""" +A stacktrace as a string in the natural representation for the language runtime. The representation is identical to [`exception.stacktrace`](/docs/exceptions/exceptions-spans.md#stacktrace-representation). +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/container_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/container_attributes.py new file mode 100644 index 00000000..9c5be2b6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/container_attributes.py @@ -0,0 +1,112 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +CONTAINER_COMMAND: Final = "container.command" +""" +The command used to run the container (i.e. the command name). +Note: If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. +""" + +CONTAINER_COMMAND_ARGS: Final = "container.command_args" +""" +All the command arguments (including the command/executable itself) run by the container. +""" + +CONTAINER_COMMAND_LINE: Final = "container.command_line" +""" +The full command run by the container as a single string representing the full command. +""" + +CONTAINER_CPU_STATE: Final = "container.cpu.state" +""" +Deprecated: Replaced by `cpu.mode`. +""" + +CONTAINER_CSI_PLUGIN_NAME: Final = "container.csi.plugin.name" +""" +The name of the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin used by the volume. +Note: This can sometimes be referred to as a "driver" in CSI implementations. This should represent the `name` field of the GetPluginInfo RPC. +""" + +CONTAINER_CSI_VOLUME_ID: Final = "container.csi.volume.id" +""" +The unique volume ID returned by the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin. +Note: This can sometimes be referred to as a "volume handle" in CSI implementations. This should represent the `Volume.volume_id` field in CSI spec. +""" + +CONTAINER_ID: Final = "container.id" +""" +Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/containers/run/#container-identification). The UUID might be abbreviated. +""" + +CONTAINER_IMAGE_ID: Final = "container.image.id" +""" +Runtime specific image identifier. Usually a hash algorithm followed by a UUID. +Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. +K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. +The ID is assigned by the container runtime and can vary in different environments. Consider using `oci.manifest.digest` if it is important to identify the same image in different environments/runtimes. +""" + +CONTAINER_IMAGE_NAME: Final = "container.image.name" +""" +Name of the image the container was built on. +""" + +CONTAINER_IMAGE_REPO_DIGESTS: Final = "container.image.repo_digests" +""" +Repo digests of the container image as provided by the container runtime. +Note: [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. +""" + +CONTAINER_IMAGE_TAGS: Final = "container.image.tags" +""" +Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). Should be only the `<tag>` section of the full name for example from `registry.example.com/my-org/my-image:<tag>`. +""" + +CONTAINER_LABEL_TEMPLATE: Final = "container.label" +""" +Container labels, `<key>` being the label name, the value being the label value. +""" + +CONTAINER_LABELS_TEMPLATE: Final = "container.labels" +""" +Deprecated: Replaced by `container.label`. +""" + +CONTAINER_NAME: Final = "container.name" +""" +Container name used by container runtime. +""" + +CONTAINER_RUNTIME: Final = "container.runtime" +""" +The container runtime managing this container. +""" + + +@deprecated( + reason="The attribute container.cpu.state is deprecated - Replaced by `cpu.mode`" +) # type: ignore +class ContainerCpuStateValues(Enum): + USER = "user" + """When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows).""" + SYSTEM = "system" + """When CPU is used by the system (host OS).""" + KERNEL = "kernel" + """When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py new file mode 100644 index 00000000..e960e203 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/cpu_attributes.py @@ -0,0 +1,45 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +CPU_LOGICAL_NUMBER: Final = "cpu.logical_number" +""" +The logical CPU number [0..n-1]. +""" + +CPU_MODE: Final = "cpu.mode" +""" +The mode of the CPU. +""" + + +class CpuModeValues(Enum): + USER = "user" + """user.""" + SYSTEM = "system" + """system.""" + NICE = "nice" + """nice.""" + IDLE = "idle" + """idle.""" + IOWAIT = "iowait" + """iowait.""" + INTERRUPT = "interrupt" + """interrupt.""" + STEAL = "steal" + """steal.""" + KERNEL = "kernel" + """kernel.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/db_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/db_attributes.py new file mode 100644 index 00000000..bc9b7f33 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/db_attributes.py @@ -0,0 +1,595 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +DB_CASSANDRA_CONSISTENCY_LEVEL: Final = "db.cassandra.consistency_level" +""" +Deprecated: Replaced by `cassandra.consistency.level`. +""" + +DB_CASSANDRA_COORDINATOR_DC: Final = "db.cassandra.coordinator.dc" +""" +Deprecated: Replaced by `cassandra.coordinator.dc`. +""" + +DB_CASSANDRA_COORDINATOR_ID: Final = "db.cassandra.coordinator.id" +""" +Deprecated: Replaced by `cassandra.coordinator.id`. +""" + +DB_CASSANDRA_IDEMPOTENCE: Final = "db.cassandra.idempotence" +""" +Deprecated: Replaced by `cassandra.query.idempotent`. +""" + +DB_CASSANDRA_PAGE_SIZE: Final = "db.cassandra.page_size" +""" +Deprecated: Replaced by `cassandra.page.size`. +""" + +DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: Final = ( + "db.cassandra.speculative_execution_count" +) +""" +Deprecated: Replaced by `cassandra.speculative_execution.count`. +""" + +DB_CASSANDRA_TABLE: Final = "db.cassandra.table" +""" +Deprecated: Replaced by `db.collection.name`. +""" + +DB_CLIENT_CONNECTION_POOL_NAME: Final = "db.client.connection.pool.name" +""" +The name of the connection pool; unique within the instrumented application. In case the connection pool implementation doesn't provide a name, instrumentation SHOULD use a combination of parameters that would make the name unique, for example, combining attributes `server.address`, `server.port`, and `db.namespace`, formatted as `server.address:server.port/db.namespace`. Instrumentations that generate connection pool name following different patterns SHOULD document it. +""" + +DB_CLIENT_CONNECTION_STATE: Final = "db.client.connection.state" +""" +The state of a connection in the pool. +""" + +DB_CLIENT_CONNECTIONS_POOL_NAME: Final = "db.client.connections.pool.name" +""" +Deprecated: Replaced by `db.client.connection.pool.name`. +""" + +DB_CLIENT_CONNECTIONS_STATE: Final = "db.client.connections.state" +""" +Deprecated: Replaced by `db.client.connection.state`. +""" + +DB_COLLECTION_NAME: Final = "db.collection.name" +""" +The name of a collection (table, container) within the database. +Note: It is RECOMMENDED to capture the value as provided by the application +without attempting to do any case normalization. + +The collection name SHOULD NOT be extracted from `db.query.text`, +when the database system supports cross-table queries in non-batch operations. + +For batch operations, if the individual operations are known to have the same +collection name then that collection name SHOULD be used. +""" + +DB_CONNECTION_STRING: Final = "db.connection_string" +""" +Deprecated: Replaced by `server.address` and `server.port`. +""" + +DB_COSMOSDB_CLIENT_ID: Final = "db.cosmosdb.client_id" +""" +Deprecated: Replaced by `azure.client.id`. +""" + +DB_COSMOSDB_CONNECTION_MODE: Final = "db.cosmosdb.connection_mode" +""" +Deprecated: Replaced by `azure.cosmosdb.connection.mode`. +""" + +DB_COSMOSDB_CONSISTENCY_LEVEL: Final = "db.cosmosdb.consistency_level" +""" +Deprecated: Replaced by `azure.cosmosdb.consistency.level`. +""" + +DB_COSMOSDB_CONTAINER: Final = "db.cosmosdb.container" +""" +Deprecated: Replaced by `db.collection.name`. +""" + +DB_COSMOSDB_OPERATION_TYPE: Final = "db.cosmosdb.operation_type" +""" +Deprecated: No replacement at this time. +""" + +DB_COSMOSDB_REGIONS_CONTACTED: Final = "db.cosmosdb.regions_contacted" +""" +Deprecated: Replaced by `azure.cosmosdb.operation.contacted_regions`. +""" + +DB_COSMOSDB_REQUEST_CHARGE: Final = "db.cosmosdb.request_charge" +""" +Deprecated: Replaced by `azure.cosmosdb.operation.request_charge`. +""" + +DB_COSMOSDB_REQUEST_CONTENT_LENGTH: Final = ( + "db.cosmosdb.request_content_length" +) +""" +Deprecated: Replaced by `azure.cosmosdb.request.body.size`. +""" + +DB_COSMOSDB_STATUS_CODE: Final = "db.cosmosdb.status_code" +""" +Deprecated: Replaced by `db.response.status_code`. +""" + +DB_COSMOSDB_SUB_STATUS_CODE: Final = "db.cosmosdb.sub_status_code" +""" +Deprecated: Replaced by `azure.cosmosdb.response.sub_status_code`. +""" + +DB_ELASTICSEARCH_CLUSTER_NAME: Final = "db.elasticsearch.cluster.name" +""" +Deprecated: Replaced by `db.namespace`. +""" + +DB_ELASTICSEARCH_NODE_NAME: Final = "db.elasticsearch.node.name" +""" +Deprecated: Replaced by `elasticsearch.node.name`. +""" + +DB_ELASTICSEARCH_PATH_PARTS_TEMPLATE: Final = "db.elasticsearch.path_parts" +""" +Deprecated: Replaced by `db.operation.parameter`. +""" + +DB_INSTANCE_ID: Final = "db.instance.id" +""" +Deprecated: Deprecated, no general replacement at this time. For Elasticsearch, use `db.elasticsearch.node.name` instead. +""" + +DB_JDBC_DRIVER_CLASSNAME: Final = "db.jdbc.driver_classname" +""" +Deprecated: Removed as not used. +""" + +DB_MONGODB_COLLECTION: Final = "db.mongodb.collection" +""" +Deprecated: Replaced by `db.collection.name`. +""" + +DB_MSSQL_INSTANCE_NAME: Final = "db.mssql.instance_name" +""" +Deprecated: Deprecated, no replacement at this time. +""" + +DB_NAME: Final = "db.name" +""" +Deprecated: Replaced by `db.namespace`. +""" + +DB_NAMESPACE: Final = "db.namespace" +""" +The name of the database, fully qualified within the server address and port. +Note: If a database system has multiple namespace components, they SHOULD be concatenated (potentially using database system specific conventions) from most general to most specific namespace component, and more specific namespaces SHOULD NOT be captured without the more general namespaces, to ensure that "startswith" queries for the more general namespaces will be valid. +Semantic conventions for individual database systems SHOULD document what `db.namespace` means in the context of that system. +It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. +""" + +DB_OPERATION: Final = "db.operation" +""" +Deprecated: Replaced by `db.operation.name`. +""" + +DB_OPERATION_BATCH_SIZE: Final = "db.operation.batch.size" +""" +The number of queries included in a batch operation. +Note: Operations are only considered batches when they contain two or more operations, and so `db.operation.batch.size` SHOULD never be `1`. +""" + +DB_OPERATION_NAME: Final = "db.operation.name" +""" +The name of the operation or command being executed. +Note: It is RECOMMENDED to capture the value as provided by the application +without attempting to do any case normalization. + +The operation name SHOULD NOT be extracted from `db.query.text`, +when the database system supports cross-table queries in non-batch operations. + +For batch operations, if the individual operations are known to have the same operation name +then that operation name SHOULD be used prepended by `BATCH `, +otherwise `db.operation.name` SHOULD be `BATCH` or some other database +system specific term if more applicable. +""" + +DB_OPERATION_PARAMETER_TEMPLATE: Final = "db.operation.parameter" +""" +A database operation parameter, with `<key>` being the parameter name, and the attribute value being a string representation of the parameter value. +Note: If a parameter has no name and instead is referenced only by index, then `<key>` SHOULD be the 0-based index. +If `db.query.text` is also captured, then `db.operation.parameter.<key>` SHOULD match up with the parameterized placeholders present in `db.query.text`. +""" + +DB_QUERY_PARAMETER_TEMPLATE: Final = "db.query.parameter" +""" +Deprecated: Replaced by `db.operation.parameter`. +""" + +DB_QUERY_SUMMARY: Final = "db.query.summary" +""" +Low cardinality representation of a database query text. +Note: `db.query.summary` provides static summary of the query text. It describes a class of database queries and is useful as a grouping key, especially when analyzing telemetry for database calls involving complex queries. +Summary may be available to the instrumentation through instrumentation hooks or other means. If it is not available, instrumentations that support query parsing SHOULD generate a summary following [Generating query summary](../database/database-spans.md#generating-a-summary-of-the-query-text) section. +""" + +DB_QUERY_TEXT: Final = "db.query.text" +""" +The database query being executed. +Note: For sanitization see [Sanitization of `db.query.text`](../database/database-spans.md#sanitization-of-dbquerytext). +For batch operations, if the individual operations are known to have the same query text then that query text SHOULD be used, otherwise all of the individual query texts SHOULD be concatenated with separator `; ` or some other database system specific separator if more applicable. +Even though parameterized query text can potentially have sensitive data, by using a parameterized query the user is giving a strong signal that any sensitive data will be passed as parameter values, and the benefit to observability of capturing the static part of the query text by default outweighs the risk. +""" + +DB_REDIS_DATABASE_INDEX: Final = "db.redis.database_index" +""" +Deprecated: Replaced by `db.namespace`. +""" + +DB_RESPONSE_RETURNED_ROWS: Final = "db.response.returned_rows" +""" +Number of rows returned by the operation. +""" + +DB_RESPONSE_STATUS_CODE: Final = "db.response.status_code" +""" +Database response status code. +Note: The status code returned by the database. Usually it represents an error code, but may also represent partial success, warning, or differentiate between various types of successful outcomes. +Semantic conventions for individual database systems SHOULD document what `db.response.status_code` means in the context of that system. +""" + +DB_SQL_TABLE: Final = "db.sql.table" +""" +Deprecated: Replaced by `db.collection.name`. +""" + +DB_STATEMENT: Final = "db.statement" +""" +Deprecated: Replaced by `db.query.text`. +""" + +DB_SYSTEM: Final = "db.system" +""" +Deprecated: Replaced by `db.system.name`. +""" + +DB_SYSTEM_NAME: Final = "db.system.name" +""" +The database management system (DBMS) product as identified by the client instrumentation. +Note: The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system.name` is set to `postgresql` based on the instrumentation's best knowledge. +""" + +DB_USER: Final = "db.user" +""" +Deprecated: No replacement at this time. +""" + + +@deprecated( + reason="The attribute db.cassandra.consistency_level is deprecated - Replaced by `cassandra.consistency.level`" +) # type: ignore +class DbCassandraConsistencyLevelValues(Enum): + ALL = "all" + """all.""" + EACH_QUORUM = "each_quorum" + """each_quorum.""" + QUORUM = "quorum" + """quorum.""" + LOCAL_QUORUM = "local_quorum" + """local_quorum.""" + ONE = "one" + """one.""" + TWO = "two" + """two.""" + THREE = "three" + """three.""" + LOCAL_ONE = "local_one" + """local_one.""" + ANY = "any" + """any.""" + SERIAL = "serial" + """serial.""" + LOCAL_SERIAL = "local_serial" + """local_serial.""" + + +class DbClientConnectionStateValues(Enum): + IDLE = "idle" + """idle.""" + USED = "used" + """used.""" + + +@deprecated( + reason="The attribute db.client.connections.state is deprecated - Replaced by `db.client.connection.state`" +) # type: ignore +class DbClientConnectionsStateValues(Enum): + IDLE = "idle" + """idle.""" + USED = "used" + """used.""" + + +@deprecated( + reason="The attribute db.cosmosdb.connection_mode is deprecated - Replaced by `azure.cosmosdb.connection.mode`" +) # type: ignore +class DbCosmosdbConnectionModeValues(Enum): + GATEWAY = "gateway" + """Gateway (HTTP) connection.""" + DIRECT = "direct" + """Direct connection.""" + + +@deprecated( + reason="The attribute db.cosmosdb.consistency_level is deprecated - Replaced by `azure.cosmosdb.consistency.level`" +) # type: ignore +class DbCosmosdbConsistencyLevelValues(Enum): + STRONG = "Strong" + """strong.""" + BOUNDED_STALENESS = "BoundedStaleness" + """bounded_staleness.""" + SESSION = "Session" + """session.""" + EVENTUAL = "Eventual" + """eventual.""" + CONSISTENT_PREFIX = "ConsistentPrefix" + """consistent_prefix.""" + + +@deprecated( + reason="The attribute db.cosmosdb.operation_type is deprecated - No replacement at this time" +) # type: ignore +class DbCosmosdbOperationTypeValues(Enum): + BATCH = "batch" + """batch.""" + CREATE = "create" + """create.""" + DELETE = "delete" + """delete.""" + EXECUTE = "execute" + """execute.""" + EXECUTE_JAVASCRIPT = "execute_javascript" + """execute_javascript.""" + INVALID = "invalid" + """invalid.""" + HEAD = "head" + """head.""" + HEAD_FEED = "head_feed" + """head_feed.""" + PATCH = "patch" + """patch.""" + QUERY = "query" + """query.""" + QUERY_PLAN = "query_plan" + """query_plan.""" + READ = "read" + """read.""" + READ_FEED = "read_feed" + """read_feed.""" + REPLACE = "replace" + """replace.""" + UPSERT = "upsert" + """upsert.""" + + +@deprecated( + reason="The attribute db.system is deprecated - Replaced by `db.system.name`" +) # type: ignore +class DbSystemValues(Enum): + OTHER_SQL = "other_sql" + """Some other SQL database. Fallback only. See notes.""" + ADABAS = "adabas" + """Adabas (Adaptable Database System).""" + CACHE = "cache" + """Deprecated: Replaced by `intersystems_cache`.""" + INTERSYSTEMS_CACHE = "intersystems_cache" + """InterSystems Caché.""" + CASSANDRA = "cassandra" + """Apache Cassandra.""" + CLICKHOUSE = "clickhouse" + """ClickHouse.""" + CLOUDSCAPE = "cloudscape" + """Deprecated: Replaced by `other_sql`.""" + COCKROACHDB = "cockroachdb" + """CockroachDB.""" + COLDFUSION = "coldfusion" + """Deprecated: Removed.""" + COSMOSDB = "cosmosdb" + """Microsoft Azure Cosmos DB.""" + COUCHBASE = "couchbase" + """Couchbase.""" + COUCHDB = "couchdb" + """CouchDB.""" + DB2 = "db2" + """IBM Db2.""" + DERBY = "derby" + """Apache Derby.""" + DYNAMODB = "dynamodb" + """Amazon DynamoDB.""" + EDB = "edb" + """EnterpriseDB.""" + ELASTICSEARCH = "elasticsearch" + """Elasticsearch.""" + FILEMAKER = "filemaker" + """FileMaker.""" + FIREBIRD = "firebird" + """Firebird.""" + FIRSTSQL = "firstsql" + """Deprecated: Replaced by `other_sql`.""" + GEODE = "geode" + """Apache Geode.""" + H2 = "h2" + """H2.""" + HANADB = "hanadb" + """SAP HANA.""" + HBASE = "hbase" + """Apache HBase.""" + HIVE = "hive" + """Apache Hive.""" + HSQLDB = "hsqldb" + """HyperSQL DataBase.""" + INFLUXDB = "influxdb" + """InfluxDB.""" + INFORMIX = "informix" + """Informix.""" + INGRES = "ingres" + """Ingres.""" + INSTANTDB = "instantdb" + """InstantDB.""" + INTERBASE = "interbase" + """InterBase.""" + MARIADB = "mariadb" + """MariaDB (This value has stability level RELEASE CANDIDATE).""" + MAXDB = "maxdb" + """SAP MaxDB.""" + MEMCACHED = "memcached" + """Memcached.""" + MONGODB = "mongodb" + """MongoDB.""" + MSSQL = "mssql" + """Microsoft SQL Server (This value has stability level RELEASE CANDIDATE).""" + MSSQLCOMPACT = "mssqlcompact" + """Deprecated: Removed, use `other_sql` instead.""" + MYSQL = "mysql" + """MySQL (This value has stability level RELEASE CANDIDATE).""" + NEO4J = "neo4j" + """Neo4j.""" + NETEZZA = "netezza" + """Netezza.""" + OPENSEARCH = "opensearch" + """OpenSearch.""" + ORACLE = "oracle" + """Oracle Database.""" + PERVASIVE = "pervasive" + """Pervasive PSQL.""" + POINTBASE = "pointbase" + """PointBase.""" + POSTGRESQL = "postgresql" + """PostgreSQL (This value has stability level RELEASE CANDIDATE).""" + PROGRESS = "progress" + """Progress Database.""" + REDIS = "redis" + """Redis.""" + REDSHIFT = "redshift" + """Amazon Redshift.""" + SPANNER = "spanner" + """Cloud Spanner.""" + SQLITE = "sqlite" + """SQLite.""" + SYBASE = "sybase" + """Sybase.""" + TERADATA = "teradata" + """Teradata.""" + TRINO = "trino" + """Trino.""" + VERTICA = "vertica" + """Vertica.""" + + +class DbSystemNameValues(Enum): + OTHER_SQL = "other_sql" + """Some other SQL database. Fallback only.""" + SOFTWAREAG_ADABAS = "softwareag.adabas" + """[Adabas (Adaptable Database System)](https://documentation.softwareag.com/?pf=adabas).""" + ACTIAN_INGRES = "actian.ingres" + """[Actian Ingres](https://www.actian.com/databases/ingres/).""" + AWS_DYNAMODB = "aws.dynamodb" + """[Amazon DynamoDB](https://aws.amazon.com/pm/dynamodb/).""" + AWS_REDSHIFT = "aws.redshift" + """[Amazon Redshift](https://aws.amazon.com/redshift/).""" + AZURE_COSMOSDB = "azure.cosmosdb" + """[Azure Cosmos DB](https://learn.microsoft.com/azure/cosmos-db).""" + INTERSYSTEMS_CACHE = "intersystems.cache" + """[InterSystems Caché](https://www.intersystems.com/products/cache/).""" + CASSANDRA = "cassandra" + """[Apache Cassandra](https://cassandra.apache.org/).""" + CLICKHOUSE = "clickhouse" + """[ClickHouse](https://clickhouse.com/).""" + COCKROACHDB = "cockroachdb" + """[CockroachDB](https://www.cockroachlabs.com/).""" + COUCHBASE = "couchbase" + """[Couchbase](https://www.couchbase.com/).""" + COUCHDB = "couchdb" + """[Apache CouchDB](https://couchdb.apache.org/).""" + DERBY = "derby" + """[Apache Derby](https://db.apache.org/derby/).""" + ELASTICSEARCH = "elasticsearch" + """[Elasticsearch](https://www.elastic.co/elasticsearch).""" + FIREBIRDSQL = "firebirdsql" + """[Firebird](https://www.firebirdsql.org/).""" + GCP_SPANNER = "gcp.spanner" + """[Google Cloud Spanner](https://cloud.google.com/spanner).""" + GEODE = "geode" + """[Apache Geode](https://geode.apache.org/).""" + H2DATABASE = "h2database" + """[H2 Database](https://h2database.com/).""" + HBASE = "hbase" + """[Apache HBase](https://hbase.apache.org/).""" + HIVE = "hive" + """[Apache Hive](https://hive.apache.org/).""" + HSQLDB = "hsqldb" + """[HyperSQL Database](https://hsqldb.org/).""" + IBM_DB2 = "ibm.db2" + """[IBM Db2](https://www.ibm.com/db2).""" + IBM_INFORMIX = "ibm.informix" + """[IBM Informix](https://www.ibm.com/products/informix).""" + IBM_NETEZZA = "ibm.netezza" + """[IBM Netezza](https://www.ibm.com/products/netezza).""" + INFLUXDB = "influxdb" + """[InfluxDB](https://www.influxdata.com/).""" + INSTANTDB = "instantdb" + """[Instant](https://www.instantdb.com/).""" + MARIADB = "mariadb" + """[MariaDB](https://mariadb.org/).""" + MEMCACHED = "memcached" + """[Memcached](https://memcached.org/).""" + MONGODB = "mongodb" + """[MongoDB](https://www.mongodb.com/).""" + MICROSOFT_SQL_SERVER = "microsoft.sql_server" + """[Microsoft SQL Server](https://www.microsoft.com/sql-server).""" + MYSQL = "mysql" + """[MySQL](https://www.mysql.com/).""" + NEO4J = "neo4j" + """[Neo4j](https://neo4j.com/).""" + OPENSEARCH = "opensearch" + """[OpenSearch](https://opensearch.org/).""" + ORACLE_DB = "oracle.db" + """[Oracle Database](https://www.oracle.com/database/).""" + POSTGRESQL = "postgresql" + """[PostgreSQL](https://www.postgresql.org/).""" + REDIS = "redis" + """[Redis](https://redis.io/).""" + SAP_HANA = "sap.hana" + """[SAP HANA](https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html).""" + SAP_MAXDB = "sap.maxdb" + """[SAP MaxDB](https://maxdb.sap.com/).""" + SQLITE = "sqlite" + """[SQLite](https://www.sqlite.org/).""" + TERADATA = "teradata" + """[Teradata](https://www.teradata.com/).""" + TRINO = "trino" + """[Trino](https://trino.io/).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/deployment_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/deployment_attributes.py new file mode 100644 index 00000000..a37626c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/deployment_attributes.py @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +DEPLOYMENT_ENVIRONMENT: Final = "deployment.environment" +""" +Deprecated: Deprecated, use `deployment.environment.name` instead. +""" + +DEPLOYMENT_ENVIRONMENT_NAME: Final = "deployment.environment.name" +""" +Name of the [deployment environment](https://wikipedia.org/wiki/Deployment_environment) (aka deployment tier). +Note: `deployment.environment.name` does not affect the uniqueness constraints defined through +the `service.namespace`, `service.name` and `service.instance.id` resource attributes. +This implies that resources carrying the following attribute combinations MUST be +considered to be identifying the same service: + +- `service.name=frontend`, `deployment.environment.name=production` +- `service.name=frontend`, `deployment.environment.name=staging`. +""" + +DEPLOYMENT_ID: Final = "deployment.id" +""" +The id of the deployment. +""" + +DEPLOYMENT_NAME: Final = "deployment.name" +""" +The name of the deployment. +""" + +DEPLOYMENT_STATUS: Final = "deployment.status" +""" +The status of the deployment. +""" + + +class DeploymentStatusValues(Enum): + FAILED = "failed" + """failed.""" + SUCCEEDED = "succeeded" + """succeeded.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/destination_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/destination_attributes.py new file mode 100644 index 00000000..8fa4949c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/destination_attributes.py @@ -0,0 +1,26 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +DESTINATION_ADDRESS: Final = "destination.address" +""" +Destination address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +Note: When observed from the source side, and when communicating through an intermediary, `destination.address` SHOULD represent the destination address behind any intermediaries, for example proxies, if it's available. +""" + +DESTINATION_PORT: Final = "destination.port" +""" +Destination port number. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/device_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/device_attributes.py new file mode 100644 index 00000000..0a65761c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/device_attributes.py @@ -0,0 +1,39 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +DEVICE_ID: Final = "device.id" +""" +A unique identifier representing the device. +Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence. +""" + +DEVICE_MANUFACTURER: Final = "device.manufacturer" +""" +The name of the device manufacturer. +Note: The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`. +""" + +DEVICE_MODEL_IDENTIFIER: Final = "device.model.identifier" +""" +The model identifier for the device. +Note: It's recommended this value represents a machine-readable version of the model identifier rather than the market or consumer-friendly name of the device. +""" + +DEVICE_MODEL_NAME: Final = "device.model.name" +""" +The marketing name for the device model. +Note: It's recommended this value represents a human-readable version of the device model rather than a machine-readable alternative. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/disk_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/disk_attributes.py new file mode 100644 index 00000000..e100f1af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/disk_attributes.py @@ -0,0 +1,28 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +DISK_IO_DIRECTION: Final = "disk.io.direction" +""" +The disk IO operation direction. +""" + + +class DiskIoDirectionValues(Enum): + READ = "read" + """read.""" + WRITE = "write" + """write.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/dns_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/dns_attributes.py new file mode 100644 index 00000000..cfb00bcf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/dns_attributes.py @@ -0,0 +1,21 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +DNS_QUESTION_NAME: Final = "dns.question.name" +""" +The name being queried. +Note: If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \\t, \\r, and \\n respectively. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/elasticsearch_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/elasticsearch_attributes.py new file mode 100644 index 00000000..24243742 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/elasticsearch_attributes.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +ELASTICSEARCH_NODE_NAME: Final = "elasticsearch.node.name" +""" +Represents the human-readable identifier of the node/instance to which a request was routed. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py new file mode 100644 index 00000000..80940440 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py @@ -0,0 +1,43 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +ENDUSER_ID: Final = "enduser.id" +""" +Unique identifier of an end user in the system. It maybe a username, email address, or other identifier. +Note: Unique identifier of an end user in the system. + +> [!Warning] +> This field contains sensitive (PII) information. +""" + +ENDUSER_PSEUDO_ID: Final = "enduser.pseudo.id" +""" +Pseudonymous identifier of an end user. This identifier should be a random value that is not directly linked or associated with the end user's actual identity. +Note: Pseudonymous identifier of an end user. + +> [!Warning] +> This field contains sensitive (linkable PII) information. +""" + +ENDUSER_ROLE: Final = "enduser.role" +""" +Deprecated: Replaced by `user.roles` attribute. +""" + +ENDUSER_SCOPE: Final = "enduser.scope" +""" +Deprecated: Removed. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/error_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/error_attributes.py new file mode 100644 index 00000000..a86592be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/error_attributes.py @@ -0,0 +1,31 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +ERROR_TYPE: Final = "error.type" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ERROR_TYPE`. +""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ErrorTypeValues`." +) # type: ignore +class ErrorTypeValues(Enum): + OTHER = "_OTHER" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ErrorTypeValues.OTHER`.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/event_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/event_attributes.py new file mode 100644 index 00000000..7fa5cf49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/event_attributes.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +EVENT_NAME: Final = "event.name" +""" +Deprecated: Replaced by EventName top-level field on the LogRecord. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/exception_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/exception_attributes.py new file mode 100644 index 00000000..37e22148 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/exception_attributes.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +EXCEPTION_ESCAPED: Final = "exception.escaped" +""" +Deprecated: It's no longer recommended to record exceptions that are handled and do not escape the scope of a span. +""" + +EXCEPTION_MESSAGE: Final = "exception.message" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_MESSAGE`. +""" + +EXCEPTION_STACKTRACE: Final = "exception.stacktrace" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_STACKTRACE`. +""" + +EXCEPTION_TYPE: Final = "exception.type" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_TYPE`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/faas_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/faas_attributes.py new file mode 100644 index 00000000..7ba2267f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/faas_attributes.py @@ -0,0 +1,161 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +FAAS_COLDSTART: Final = "faas.coldstart" +""" +A boolean that is true if the serverless function is executed for the first time (aka cold-start). +""" + +FAAS_CRON: Final = "faas.cron" +""" +A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +""" + +FAAS_DOCUMENT_COLLECTION: Final = "faas.document.collection" +""" +The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. +""" + +FAAS_DOCUMENT_NAME: Final = "faas.document.name" +""" +The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. +""" + +FAAS_DOCUMENT_OPERATION: Final = "faas.document.operation" +""" +Describes the type of the operation that was performed on the data. +""" + +FAAS_DOCUMENT_TIME: Final = "faas.document.time" +""" +A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +""" + +FAAS_INSTANCE: Final = "faas.instance" +""" +The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. +Note: - **AWS Lambda:** Use the (full) log stream name. +""" + +FAAS_INVOCATION_ID: Final = "faas.invocation_id" +""" +The invocation ID of the current function invocation. +""" + +FAAS_INVOKED_NAME: Final = "faas.invoked_name" +""" +The name of the invoked function. +Note: SHOULD be equal to the `faas.name` resource attribute of the invoked function. +""" + +FAAS_INVOKED_PROVIDER: Final = "faas.invoked_provider" +""" +The cloud provider of the invoked function. +Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. +""" + +FAAS_INVOKED_REGION: Final = "faas.invoked_region" +""" +The cloud region of the invoked function. +Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked function. +""" + +FAAS_MAX_MEMORY: Final = "faas.max_memory" +""" +The amount of memory available to the serverless function converted to Bytes. +Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). +""" + +FAAS_NAME: Final = "faas.name" +""" +The name of the single function that this runtime instance executes. +Note: This is the name of the function as configured/deployed on the FaaS +platform and is usually different from the name of the callback +function (which may be stored in the +[`code.namespace`/`code.function.name`](/docs/general/attributes.md#source-code-attributes) +span attributes). + +For some cloud providers, the above definition is ambiguous. The following +definition of function name MUST be used for this attribute +(and consequently the span name) for the listed cloud providers/products: + +- **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name + followed by a forward slash followed by the function name (this form + can also be seen in the resource JSON for the function). + This means that a span attribute MUST be used, as an Azure function + app can host multiple functions that would usually share + a TracerProvider (see also the `cloud.resource_id` attribute). +""" + +FAAS_TIME: Final = "faas.time" +""" +A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +""" + +FAAS_TRIGGER: Final = "faas.trigger" +""" +Type of the trigger which caused this function invocation. +""" + +FAAS_VERSION: Final = "faas.version" +""" +The immutable version of the function being executed. +Note: Depending on the cloud provider and platform, use: + +- **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + (an integer represented as a decimal string). +- **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) + (i.e., the function name plus the revision suffix). +- **Google Cloud Functions:** The value of the + [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). +- **Azure Functions:** Not applicable. Do not set this attribute. +""" + + +class FaasDocumentOperationValues(Enum): + INSERT = "insert" + """When a new object is created.""" + EDIT = "edit" + """When an object is modified.""" + DELETE = "delete" + """When an object is deleted.""" + + +class FaasInvokedProviderValues(Enum): + ALIBABA_CLOUD = "alibaba_cloud" + """Alibaba Cloud.""" + AWS = "aws" + """Amazon Web Services.""" + AZURE = "azure" + """Microsoft Azure.""" + GCP = "gcp" + """Google Cloud Platform.""" + TENCENT_CLOUD = "tencent_cloud" + """Tencent Cloud.""" + + +class FaasTriggerValues(Enum): + DATASOURCE = "datasource" + """A response to some data source operation such as a database or filesystem read/write.""" + HTTP = "http" + """To provide an answer to an inbound HTTP request.""" + PUBSUB = "pubsub" + """A function is set to be executed when messages are sent to a messaging system.""" + TIMER = "timer" + """A function is scheduled to be executed regularly.""" + OTHER = "other" + """If none of the others apply.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/feature_flag_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/feature_flag_attributes.py new file mode 100644 index 00000000..30b56abb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/feature_flag_attributes.py @@ -0,0 +1,83 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +FEATURE_FLAG_CONTEXT_ID: Final = "feature_flag.context.id" +""" +The unique identifier for the flag evaluation context. For example, the targeting key. +""" + +FEATURE_FLAG_EVALUATION_ERROR_MESSAGE: Final = ( + "feature_flag.evaluation.error.message" +) +""" +A message explaining the nature of an error occurring during flag evaluation. +""" + +FEATURE_FLAG_EVALUATION_REASON: Final = "feature_flag.evaluation.reason" +""" +The reason code which shows how a feature flag value was determined. +""" + +FEATURE_FLAG_KEY: Final = "feature_flag.key" +""" +The lookup key of the feature flag. +""" + +FEATURE_FLAG_PROVIDER_NAME: Final = "feature_flag.provider_name" +""" +Identifies the feature flag provider. +""" + +FEATURE_FLAG_SET_ID: Final = "feature_flag.set.id" +""" +The identifier of the [flag set](https://openfeature.dev/specification/glossary/#flag-set) to which the feature flag belongs. +""" + +FEATURE_FLAG_VARIANT: Final = "feature_flag.variant" +""" +A semantic identifier for an evaluated flag value. +Note: A semantic identifier, commonly referred to as a variant, provides a means +for referring to a value without including the value itself. This can +provide additional context for understanding the meaning behind a value. +For example, the variant `red` maybe be used for the value `#c05543`. +""" + +FEATURE_FLAG_VERSION: Final = "feature_flag.version" +""" +The version of the ruleset used during the evaluation. This may be any stable value which uniquely identifies the ruleset. +""" + + +class FeatureFlagEvaluationReasonValues(Enum): + STATIC = "static" + """The resolved value is static (no dynamic evaluation).""" + DEFAULT = "default" + """The resolved value fell back to a pre-configured value (no dynamic evaluation occurred or dynamic evaluation yielded no result).""" + TARGETING_MATCH = "targeting_match" + """The resolved value was the result of a dynamic evaluation, such as a rule or specific user-targeting.""" + SPLIT = "split" + """The resolved value was the result of pseudorandom assignment.""" + CACHED = "cached" + """The resolved value was retrieved from cache.""" + DISABLED = "disabled" + """The resolved value was the result of the flag being disabled in the management system.""" + UNKNOWN = "unknown" + """The reason for the resolved value could not be determined.""" + STALE = "stale" + """The resolved value is non-authoritative or possibly out of date.""" + ERROR = "error" + """The resolved value was the result of an error.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/file_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/file_attributes.py new file mode 100644 index 00000000..97ac01e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/file_attributes.py @@ -0,0 +1,113 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +FILE_ACCESSED: Final = "file.accessed" +""" +Time when the file was last accessed, in ISO 8601 format. +Note: This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. +""" + +FILE_ATTRIBUTES: Final = "file.attributes" +""" +Array of file attributes. +Note: Attributes names depend on the OS or file system. Here’s a non-exhaustive list of values expected for this attribute: `archive`, `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, `write`. +""" + +FILE_CHANGED: Final = "file.changed" +""" +Time when the file attributes or metadata was last changed, in ISO 8601 format. +Note: `file.changed` captures the time when any of the file's properties or attributes (including the content) are changed, while `file.modified` captures the timestamp when the file content is modified. +""" + +FILE_CREATED: Final = "file.created" +""" +Time when the file was created, in ISO 8601 format. +Note: This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. +""" + +FILE_DIRECTORY: Final = "file.directory" +""" +Directory where the file is located. It should include the drive letter, when appropriate. +""" + +FILE_EXTENSION: Final = "file.extension" +""" +File extension, excluding the leading dot. +Note: When the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). +""" + +FILE_FORK_NAME: Final = "file.fork_name" +""" +Name of the fork. A fork is additional data associated with a filesystem object. +Note: On Linux, a resource fork is used to store additional data with a filesystem object. A file always has at least one fork for the data portion, and additional forks may exist. +On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default data stream for a file is just called $DATA. Zone.Identifier is commonly used by Windows to track contents downloaded from the Internet. An ADS is typically of the form: C:\\path\\to\\filename.extension:some_fork_name, and some_fork_name is the value that should populate `fork_name`. `filename.extension` should populate `file.name`, and `extension` should populate `file.extension`. The full path, `file.path`, will include the fork name. +""" + +FILE_GROUP_ID: Final = "file.group.id" +""" +Primary Group ID (GID) of the file. +""" + +FILE_GROUP_NAME: Final = "file.group.name" +""" +Primary group name of the file. +""" + +FILE_INODE: Final = "file.inode" +""" +Inode representing the file in the filesystem. +""" + +FILE_MODE: Final = "file.mode" +""" +Mode of the file in octal representation. +""" + +FILE_MODIFIED: Final = "file.modified" +""" +Time when the file content was last modified, in ISO 8601 format. +""" + +FILE_NAME: Final = "file.name" +""" +Name of the file including the extension, without the directory. +""" + +FILE_OWNER_ID: Final = "file.owner.id" +""" +The user ID (UID) or security identifier (SID) of the file owner. +""" + +FILE_OWNER_NAME: Final = "file.owner.name" +""" +Username of the file owner. +""" + +FILE_PATH: Final = "file.path" +""" +Full path to the file, including the file name. It should include the drive letter, when appropriate. +""" + +FILE_SIZE: Final = "file.size" +""" +File size in bytes. +""" + +FILE_SYMBOLIC_LINK_TARGET_PATH: Final = "file.symbolic_link.target_path" +""" +Path to the target of a symbolic link. +Note: This attribute is only applicable to symbolic links. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py new file mode 100644 index 00000000..3dcd23f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py @@ -0,0 +1,41 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +GCP_CLIENT_SERVICE: Final = "gcp.client.service" +""" +Identifies the Google Cloud service for which the official client library is intended. +Note: Intended to be a stable identifier for Google Cloud client libraries that is uniform across implementation languages. The value should be derived from the canonical service domain for the service; for example, 'foo.googleapis.com' should result in a value of 'foo'. +""" + +GCP_CLOUD_RUN_JOB_EXECUTION: Final = "gcp.cloud_run.job.execution" +""" +The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. +""" + +GCP_CLOUD_RUN_JOB_TASK_INDEX: Final = "gcp.cloud_run.job.task_index" +""" +The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. +""" + +GCP_GCE_INSTANCE_HOSTNAME: Final = "gcp.gce.instance.hostname" +""" +The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +""" + +GCP_GCE_INSTANCE_NAME: Final = "gcp.gce.instance.name" +""" +The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py new file mode 100644 index 00000000..70b1feb1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py @@ -0,0 +1,305 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +GEN_AI_AGENT_DESCRIPTION: Final = "gen_ai.agent.description" +""" +Free-form description of the GenAI agent provided by the application. +""" + +GEN_AI_AGENT_ID: Final = "gen_ai.agent.id" +""" +The unique identifier of the GenAI agent. +""" + +GEN_AI_AGENT_NAME: Final = "gen_ai.agent.name" +""" +Human-readable name of the GenAI agent provided by the application. +""" + +GEN_AI_COMPLETION: Final = "gen_ai.completion" +""" +Deprecated: Removed, no replacement at this time. +""" + +GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: Final = ( + "gen_ai.openai.request.response_format" +) +""" +Deprecated: Replaced by `gen_ai.output.type`. +""" + +GEN_AI_OPENAI_REQUEST_SEED: Final = "gen_ai.openai.request.seed" +""" +Deprecated: Replaced by `gen_ai.request.seed` attribute. +""" + +GEN_AI_OPENAI_REQUEST_SERVICE_TIER: Final = ( + "gen_ai.openai.request.service_tier" +) +""" +The service tier requested. May be a specific tier, default, or auto. +""" + +GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: Final = ( + "gen_ai.openai.response.service_tier" +) +""" +The service tier used for the response. +""" + +GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT: Final = ( + "gen_ai.openai.response.system_fingerprint" +) +""" +A fingerprint to track any eventual change in the Generative AI environment. +""" + +GEN_AI_OPERATION_NAME: Final = "gen_ai.operation.name" +""" +The name of the operation being performed. +Note: If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value. +""" + +GEN_AI_OUTPUT_TYPE: Final = "gen_ai.output.type" +""" +Represents the content type requested by the client. +Note: This attribute SHOULD be used when the client requests output of a specific type. The model may return zero or more outputs of this type. +This attribute specifies the output modality and not the actual output format. For example, if an image is requested, the actual output could be a URL pointing to an image file. +Additional output format details may be recorded in the future in the `gen_ai.output.{type}.*` attributes. +""" + +GEN_AI_PROMPT: Final = "gen_ai.prompt" +""" +Deprecated: Removed, no replacement at this time. +""" + +GEN_AI_REQUEST_CHOICE_COUNT: Final = "gen_ai.request.choice.count" +""" +The target number of candidate completions to return. +""" + +GEN_AI_REQUEST_ENCODING_FORMATS: Final = "gen_ai.request.encoding_formats" +""" +The encoding formats requested in an embeddings operation, if specified. +Note: In some GenAI systems the encoding formats are called embedding types. Also, some GenAI systems only accept a single format per request. +""" + +GEN_AI_REQUEST_FREQUENCY_PENALTY: Final = "gen_ai.request.frequency_penalty" +""" +The frequency penalty setting for the GenAI request. +""" + +GEN_AI_REQUEST_MAX_TOKENS: Final = "gen_ai.request.max_tokens" +""" +The maximum number of tokens the model generates for a request. +""" + +GEN_AI_REQUEST_MODEL: Final = "gen_ai.request.model" +""" +The name of the GenAI model a request is being made to. +""" + +GEN_AI_REQUEST_PRESENCE_PENALTY: Final = "gen_ai.request.presence_penalty" +""" +The presence penalty setting for the GenAI request. +""" + +GEN_AI_REQUEST_SEED: Final = "gen_ai.request.seed" +""" +Requests with same seed value more likely to return same result. +""" + +GEN_AI_REQUEST_STOP_SEQUENCES: Final = "gen_ai.request.stop_sequences" +""" +List of sequences that the model will use to stop generating further tokens. +""" + +GEN_AI_REQUEST_TEMPERATURE: Final = "gen_ai.request.temperature" +""" +The temperature setting for the GenAI request. +""" + +GEN_AI_REQUEST_TOP_K: Final = "gen_ai.request.top_k" +""" +The top_k sampling setting for the GenAI request. +""" + +GEN_AI_REQUEST_TOP_P: Final = "gen_ai.request.top_p" +""" +The top_p sampling setting for the GenAI request. +""" + +GEN_AI_RESPONSE_FINISH_REASONS: Final = "gen_ai.response.finish_reasons" +""" +Array of reasons the model stopped generating tokens, corresponding to each generation received. +""" + +GEN_AI_RESPONSE_ID: Final = "gen_ai.response.id" +""" +The unique identifier for the completion. +""" + +GEN_AI_RESPONSE_MODEL: Final = "gen_ai.response.model" +""" +The name of the model that generated the response. +""" + +GEN_AI_SYSTEM: Final = "gen_ai.system" +""" +The Generative AI product as identified by the client or server instrumentation. +Note: The `gen_ai.system` describes a family of GenAI models with specific model identified +by `gen_ai.request.model` and `gen_ai.response.model` attributes. + +The actual GenAI product may differ from the one identified by the client. +Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client +libraries. In such cases, the `gen_ai.system` is set to `openai` based on the +instrumentation's best knowledge, instead of the actual system. The `server.address` +attribute may help identify the actual system in use for `openai`. + +For custom model, a custom friendly name SHOULD be used. +If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`. +""" + +GEN_AI_TOKEN_TYPE: Final = "gen_ai.token.type" +""" +The type of token being counted. +""" + +GEN_AI_TOOL_CALL_ID: Final = "gen_ai.tool.call.id" +""" +The tool call identifier. +""" + +GEN_AI_TOOL_NAME: Final = "gen_ai.tool.name" +""" +Name of the tool utilized by the agent. +""" + +GEN_AI_TOOL_TYPE: Final = "gen_ai.tool.type" +""" +Type of the tool utilized by the agent. +Note: Extension: A tool executed on the agent-side to directly call external APIs, bridging the gap between the agent and real-world systems. + Agent-side operations involve actions that are performed by the agent on the server or within the agent's controlled environment. +Function: A tool executed on the client-side, where the agent generates parameters for a predefined function, and the client executes the logic. + Client-side operations are actions taken on the user's end or within the client application. +Datastore: A tool used by the agent to access and query structured or unstructured external data for retrieval-augmented tasks or knowledge updates. +""" + +GEN_AI_USAGE_COMPLETION_TOKENS: Final = "gen_ai.usage.completion_tokens" +""" +Deprecated: Replaced by `gen_ai.usage.output_tokens` attribute. +""" + +GEN_AI_USAGE_INPUT_TOKENS: Final = "gen_ai.usage.input_tokens" +""" +The number of tokens used in the GenAI input (prompt). +""" + +GEN_AI_USAGE_OUTPUT_TOKENS: Final = "gen_ai.usage.output_tokens" +""" +The number of tokens used in the GenAI response (completion). +""" + +GEN_AI_USAGE_PROMPT_TOKENS: Final = "gen_ai.usage.prompt_tokens" +""" +Deprecated: Replaced by `gen_ai.usage.input_tokens` attribute. +""" + + +@deprecated( + reason="The attribute gen_ai.openai.request.response_format is deprecated - Replaced by `gen_ai.output.type`" +) # type: ignore +class GenAiOpenaiRequestResponseFormatValues(Enum): + TEXT = "text" + """Text response format.""" + JSON_OBJECT = "json_object" + """JSON object response format.""" + JSON_SCHEMA = "json_schema" + """JSON schema response format.""" + + +class GenAiOpenaiRequestServiceTierValues(Enum): + AUTO = "auto" + """The system will utilize scale tier credits until they are exhausted.""" + DEFAULT = "default" + """The system will utilize the default scale tier.""" + + +class GenAiOperationNameValues(Enum): + CHAT = "chat" + """Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat).""" + TEXT_COMPLETION = "text_completion" + """Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions).""" + EMBEDDINGS = "embeddings" + """Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create).""" + CREATE_AGENT = "create_agent" + """Create GenAI agent.""" + EXECUTE_TOOL = "execute_tool" + """Execute a tool.""" + + +class GenAiOutputTypeValues(Enum): + TEXT = "text" + """Plain text.""" + JSON = "json" + """JSON object with known or unknown schema.""" + IMAGE = "image" + """Image.""" + SPEECH = "speech" + """Speech.""" + + +class GenAiSystemValues(Enum): + OPENAI = "openai" + """OpenAI.""" + VERTEX_AI = "vertex_ai" + """Vertex AI.""" + GEMINI = "gemini" + """Gemini.""" + ANTHROPIC = "anthropic" + """Anthropic.""" + COHERE = "cohere" + """Cohere.""" + AZ_AI_INFERENCE = "az.ai.inference" + """Azure AI Inference.""" + AZ_AI_OPENAI = "az.ai.openai" + """Azure OpenAI.""" + IBM_WATSONX_AI = "ibm.watsonx.ai" + """IBM Watsonx AI.""" + AWS_BEDROCK = "aws.bedrock" + """AWS Bedrock.""" + PERPLEXITY = "perplexity" + """Perplexity.""" + XAI = "xai" + """xAI.""" + DEEPSEEK = "deepseek" + """DeepSeek.""" + GROQ = "groq" + """Groq.""" + MISTRAL_AI = "mistral_ai" + """Mistral AI.""" + + +class GenAiTokenTypeValues(Enum): + INPUT = "input" + """Input tokens (prompt, input, etc.).""" + COMPLETION = "output" + """Deprecated: Replaced by `output`.""" + OUTPUT = "output" + """Output tokens (completion, response, etc.).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/geo_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/geo_attributes.py new file mode 100644 index 00000000..573e5238 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/geo_attributes.py @@ -0,0 +1,68 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +GEO_CONTINENT_CODE: Final = "geo.continent.code" +""" +Two-letter code representing continent’s name. +""" + +GEO_COUNTRY_ISO_CODE: Final = "geo.country.iso_code" +""" +Two-letter ISO Country Code ([ISO 3166-1 alpha2](https://wikipedia.org/wiki/ISO_3166-1#Codes)). +""" + +GEO_LOCALITY_NAME: Final = "geo.locality.name" +""" +Locality name. Represents the name of a city, town, village, or similar populated place. +""" + +GEO_LOCATION_LAT: Final = "geo.location.lat" +""" +Latitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). +""" + +GEO_LOCATION_LON: Final = "geo.location.lon" +""" +Longitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). +""" + +GEO_POSTAL_CODE: Final = "geo.postal_code" +""" +Postal code associated with the location. Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. +""" + +GEO_REGION_ISO_CODE: Final = "geo.region.iso_code" +""" +Region ISO code ([ISO 3166-2](https://wikipedia.org/wiki/ISO_3166-2)). +""" + + +class GeoContinentCodeValues(Enum): + AF = "AF" + """Africa.""" + AN = "AN" + """Antarctica.""" + AS = "AS" + """Asia.""" + EU = "EU" + """Europe.""" + NA = "NA" + """North America.""" + OC = "OC" + """Oceania.""" + SA = "SA" + """South America.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/graphql_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/graphql_attributes.py new file mode 100644 index 00000000..c4677717 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/graphql_attributes.py @@ -0,0 +1,41 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +GRAPHQL_DOCUMENT: Final = "graphql.document" +""" +The GraphQL document being executed. +Note: The value may be sanitized to exclude sensitive information. +""" + +GRAPHQL_OPERATION_NAME: Final = "graphql.operation.name" +""" +The name of the operation being executed. +""" + +GRAPHQL_OPERATION_TYPE: Final = "graphql.operation.type" +""" +The type of the operation being executed. +""" + + +class GraphqlOperationTypeValues(Enum): + QUERY = "query" + """GraphQL query.""" + MUTATION = "mutation" + """GraphQL mutation.""" + SUBSCRIPTION = "subscription" + """GraphQL subscription.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/heroku_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/heroku_attributes.py new file mode 100644 index 00000000..83ba66b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/heroku_attributes.py @@ -0,0 +1,30 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +HEROKU_APP_ID: Final = "heroku.app.id" +""" +Unique identifier for the application. +""" + +HEROKU_RELEASE_COMMIT: Final = "heroku.release.commit" +""" +Commit hash for the current release. +""" + +HEROKU_RELEASE_CREATION_TIMESTAMP: Final = "heroku.release.creation_timestamp" +""" +Time and date the release was created. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/host_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/host_attributes.py new file mode 100644 index 00000000..72847e65 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/host_attributes.py @@ -0,0 +1,113 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +HOST_ARCH: Final = "host.arch" +""" +The CPU architecture the host system is running on. +""" + +HOST_CPU_CACHE_L2_SIZE: Final = "host.cpu.cache.l2.size" +""" +The amount of level 2 memory cache available to the processor (in Bytes). +""" + +HOST_CPU_FAMILY: Final = "host.cpu.family" +""" +Family or generation of the CPU. +""" + +HOST_CPU_MODEL_ID: Final = "host.cpu.model.id" +""" +Model identifier. It provides more granular information about the CPU, distinguishing it from other CPUs within the same family. +""" + +HOST_CPU_MODEL_NAME: Final = "host.cpu.model.name" +""" +Model designation of the processor. +""" + +HOST_CPU_STEPPING: Final = "host.cpu.stepping" +""" +Stepping or core revisions. +""" + +HOST_CPU_VENDOR_ID: Final = "host.cpu.vendor.id" +""" +Processor manufacturer identifier. A maximum 12-character string. +Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor ID string in EBX, EDX and ECX registers. Writing these to memory in this order results in a 12-character string. +""" + +HOST_ID: Final = "host.id" +""" +Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. +""" + +HOST_IMAGE_ID: Final = "host.image.id" +""" +VM image ID or host OS image ID. For Cloud, this value is from the provider. +""" + +HOST_IMAGE_NAME: Final = "host.image.name" +""" +Name of the VM image or OS install the host was instantiated from. +""" + +HOST_IMAGE_VERSION: Final = "host.image.version" +""" +The version string of the VM image or host OS as defined in [Version Attributes](/docs/resource/README.md#version-attributes). +""" + +HOST_IP: Final = "host.ip" +""" +Available IP addresses of the host, excluding loopback interfaces. +Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses MUST be specified in the [RFC 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. +""" + +HOST_MAC: Final = "host.mac" +""" +Available MAC addresses of the host, excluding loopback interfaces. +Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): as hyphen-separated octets in uppercase hexadecimal form from most to least significant. +""" + +HOST_NAME: Final = "host.name" +""" +Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. +""" + +HOST_TYPE: Final = "host.type" +""" +Type of host. For Cloud, this must be the machine type. +""" + + +class HostArchValues(Enum): + AMD64 = "amd64" + """AMD64.""" + ARM32 = "arm32" + """ARM32.""" + ARM64 = "arm64" + """ARM64.""" + IA64 = "ia64" + """Itanium.""" + PPC32 = "ppc32" + """32-bit PowerPC.""" + PPC64 = "ppc64" + """64-bit PowerPC.""" + S390X = "s390x" + """IBM z/Architecture.""" + X86 = "x86" + """32-bit x86.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/http_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/http_attributes.py new file mode 100644 index 00000000..f2e1ccb7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/http_attributes.py @@ -0,0 +1,203 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +HTTP_CLIENT_IP: Final = "http.client_ip" +""" +Deprecated: Replaced by `client.address`. +""" + +HTTP_CONNECTION_STATE: Final = "http.connection.state" +""" +State of the HTTP connection in the HTTP connection pool. +""" + +HTTP_FLAVOR: Final = "http.flavor" +""" +Deprecated: Replaced by `network.protocol.name`. +""" + +HTTP_HOST: Final = "http.host" +""" +Deprecated: Replaced by one of `server.address`, `client.address` or `http.request.header.host`, depending on the usage. +""" + +HTTP_METHOD: Final = "http.method" +""" +Deprecated: Replaced by `http.request.method`. +""" + +HTTP_REQUEST_BODY_SIZE: Final = "http.request.body.size" +""" +The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + +HTTP_REQUEST_HEADER_TEMPLATE: Final = "http.request.header" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_HEADER_TEMPLATE`. +""" + +HTTP_REQUEST_METHOD: Final = "http.request.method" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_METHOD`. +""" + +HTTP_REQUEST_METHOD_ORIGINAL: Final = "http.request.method_original" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_METHOD_ORIGINAL`. +""" + +HTTP_REQUEST_RESEND_COUNT: Final = "http.request.resend_count" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_RESEND_COUNT`. +""" + +HTTP_REQUEST_SIZE: Final = "http.request.size" +""" +The total size of the request in bytes. This should be the total number of bytes sent over the wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request body if any. +""" + +HTTP_REQUEST_CONTENT_LENGTH: Final = "http.request_content_length" +""" +Deprecated: Replaced by `http.request.header.<key>`. +""" + +HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED: Final = ( + "http.request_content_length_uncompressed" +) +""" +Deprecated: Replaced by `http.request.body.size`. +""" + +HTTP_RESPONSE_BODY_SIZE: Final = "http.response.body.size" +""" +The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + +HTTP_RESPONSE_HEADER_TEMPLATE: Final = "http.response.header" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_RESPONSE_HEADER_TEMPLATE`. +""" + +HTTP_RESPONSE_SIZE: Final = "http.response.size" +""" +The total size of the response in bytes. This should be the total number of bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and response body and trailers if any. +""" + +HTTP_RESPONSE_STATUS_CODE: Final = "http.response.status_code" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_RESPONSE_STATUS_CODE`. +""" + +HTTP_RESPONSE_CONTENT_LENGTH: Final = "http.response_content_length" +""" +Deprecated: Replaced by `http.response.header.<key>`. +""" + +HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED: Final = ( + "http.response_content_length_uncompressed" +) +""" +Deprecated: Replace by `http.response.body.size`. +""" + +HTTP_ROUTE: Final = "http.route" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_ROUTE`. +""" + +HTTP_SCHEME: Final = "http.scheme" +""" +Deprecated: Replaced by `url.scheme` instead. +""" + +HTTP_SERVER_NAME: Final = "http.server_name" +""" +Deprecated: Replaced by `server.address`. +""" + +HTTP_STATUS_CODE: Final = "http.status_code" +""" +Deprecated: Replaced by `http.response.status_code`. +""" + +HTTP_TARGET: Final = "http.target" +""" +Deprecated: Split to `url.path` and `url.query. +""" + +HTTP_URL: Final = "http.url" +""" +Deprecated: Replaced by `url.full`. +""" + +HTTP_USER_AGENT: Final = "http.user_agent" +""" +Deprecated: Replaced by `user_agent.original`. +""" + + +class HttpConnectionStateValues(Enum): + ACTIVE = "active" + """active state.""" + IDLE = "idle" + """idle state.""" + + +@deprecated( + reason="The attribute http.flavor is deprecated - Replaced by `network.protocol.name`" +) # type: ignore +class HttpFlavorValues(Enum): + HTTP_1_0 = "1.0" + """HTTP/1.0.""" + HTTP_1_1 = "1.1" + """HTTP/1.1.""" + HTTP_2_0 = "2.0" + """HTTP/2.""" + HTTP_3_0 = "3.0" + """HTTP/3.""" + SPDY = "SPDY" + """SPDY protocol.""" + QUIC = "QUIC" + """QUIC protocol.""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues`." +) # type: ignore +class HttpRequestMethodValues(Enum): + CONNECT = "CONNECT" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.CONNECT`.""" + DELETE = "DELETE" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.DELETE`.""" + GET = "GET" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.GET`.""" + HEAD = "HEAD" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.HEAD`.""" + OPTIONS = "OPTIONS" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.OPTIONS`.""" + PATCH = "PATCH" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.PATCH`.""" + POST = "POST" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.POST`.""" + PUT = "PUT" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.PUT`.""" + TRACE = "TRACE" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.TRACE`.""" + OTHER = "_OTHER" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.OTHER`.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/hw_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/hw_attributes.py new file mode 100644 index 00000000..510eb976 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/hw_attributes.py @@ -0,0 +1,82 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +HW_ID: Final = "hw.id" +""" +An identifier for the hardware component, unique within the monitored host. +""" + +HW_NAME: Final = "hw.name" +""" +An easily-recognizable name for the hardware component. +""" + +HW_PARENT: Final = "hw.parent" +""" +Unique identifier of the parent component (typically the `hw.id` attribute of the enclosure, or disk controller). +""" + +HW_STATE: Final = "hw.state" +""" +The current state of the component. +""" + +HW_TYPE: Final = "hw.type" +""" +Type of the component. +Note: Describes the category of the hardware component for which `hw.state` is being reported. For example, `hw.type=temperature` along with `hw.state=degraded` would indicate that the temperature of the hardware component has been reported as `degraded`. +""" + + +class HwStateValues(Enum): + OK = "ok" + """Ok.""" + DEGRADED = "degraded" + """Degraded.""" + FAILED = "failed" + """Failed.""" + + +class HwTypeValues(Enum): + BATTERY = "battery" + """Battery.""" + CPU = "cpu" + """CPU.""" + DISK_CONTROLLER = "disk_controller" + """Disk controller.""" + ENCLOSURE = "enclosure" + """Enclosure.""" + FAN = "fan" + """Fan.""" + GPU = "gpu" + """GPU.""" + LOGICAL_DISK = "logical_disk" + """Logical disk.""" + MEMORY = "memory" + """Memory.""" + NETWORK = "network" + """Network.""" + PHYSICAL_DISK = "physical_disk" + """Physical disk.""" + POWER_SUPPLY = "power_supply" + """Power supply.""" + TAPE_DRIVE = "tape_drive" + """Tape drive.""" + TEMPERATURE = "temperature" + """Temperature.""" + VOLTAGE = "voltage" + """Voltage.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py new file mode 100644 index 00000000..9d79cd9b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py @@ -0,0 +1,234 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +K8S_CLUSTER_NAME: Final = "k8s.cluster.name" +""" +The name of the cluster. +""" + +K8S_CLUSTER_UID: Final = "k8s.cluster.uid" +""" +A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. +Note: K8s doesn't have support for obtaining a cluster ID. If this is ever +added, we will recommend collecting the `k8s.cluster.uid` through the +official APIs. In the meantime, we are able to use the `uid` of the +`kube-system` namespace as a proxy for cluster ID. Read on for the +rationale. + +Every object created in a K8s cluster is assigned a distinct UID. The +`kube-system` namespace is used by Kubernetes itself and will exist +for the lifetime of the cluster. Using the `uid` of the `kube-system` +namespace is a reasonable proxy for the K8s ClusterID as it will only +change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are +UUIDs as standardized by +[ISO/IEC 9834-8 and ITU-T X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). +Which states: + +> If generated according to one of the mechanisms defined in Rec. +> ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be +> different from all other UUIDs generated before 3603 A.D., or is +> extremely likely to be different (depending on the mechanism chosen). + +Therefore, UIDs between clusters should be extremely unlikely to +conflict. +""" + +K8S_CONTAINER_NAME: Final = "k8s.container.name" +""" +The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). +""" + +K8S_CONTAINER_RESTART_COUNT: Final = "k8s.container.restart_count" +""" +Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. +""" + +K8S_CONTAINER_STATUS_LAST_TERMINATED_REASON: Final = ( + "k8s.container.status.last_terminated_reason" +) +""" +Last terminated reason of the Container. +""" + +K8S_CRONJOB_NAME: Final = "k8s.cronjob.name" +""" +The name of the CronJob. +""" + +K8S_CRONJOB_UID: Final = "k8s.cronjob.uid" +""" +The UID of the CronJob. +""" + +K8S_DAEMONSET_NAME: Final = "k8s.daemonset.name" +""" +The name of the DaemonSet. +""" + +K8S_DAEMONSET_UID: Final = "k8s.daemonset.uid" +""" +The UID of the DaemonSet. +""" + +K8S_DEPLOYMENT_NAME: Final = "k8s.deployment.name" +""" +The name of the Deployment. +""" + +K8S_DEPLOYMENT_UID: Final = "k8s.deployment.uid" +""" +The UID of the Deployment. +""" + +K8S_HPA_NAME: Final = "k8s.hpa.name" +""" +The name of the horizontal pod autoscaler. +""" + +K8S_HPA_UID: Final = "k8s.hpa.uid" +""" +The UID of the horizontal pod autoscaler. +""" + +K8S_JOB_NAME: Final = "k8s.job.name" +""" +The name of the Job. +""" + +K8S_JOB_UID: Final = "k8s.job.uid" +""" +The UID of the Job. +""" + +K8S_NAMESPACE_NAME: Final = "k8s.namespace.name" +""" +The name of the namespace that the pod is running in. +""" + +K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase" +""" +The phase of the K8s namespace. +Note: This attribute aligns with the `phase` field of the +[K8s NamespaceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core). +""" + +K8S_NODE_NAME: Final = "k8s.node.name" +""" +The name of the Node. +""" + +K8S_NODE_UID: Final = "k8s.node.uid" +""" +The UID of the Node. +""" + +K8S_POD_ANNOTATION_TEMPLATE: Final = "k8s.pod.annotation" +""" +The annotation key-value pairs placed on the Pod, the `<key>` being the annotation name, the value being the annotation value. +""" + +K8S_POD_LABEL_TEMPLATE: Final = "k8s.pod.label" +""" +The label key-value pairs placed on the Pod, the `<key>` being the label name, the value being the label value. +""" + +K8S_POD_LABELS_TEMPLATE: Final = "k8s.pod.labels" +""" +Deprecated: Replaced by `k8s.pod.label`. +""" + +K8S_POD_NAME: Final = "k8s.pod.name" +""" +The name of the Pod. +""" + +K8S_POD_UID: Final = "k8s.pod.uid" +""" +The UID of the Pod. +""" + +K8S_REPLICASET_NAME: Final = "k8s.replicaset.name" +""" +The name of the ReplicaSet. +""" + +K8S_REPLICASET_UID: Final = "k8s.replicaset.uid" +""" +The UID of the ReplicaSet. +""" + +K8S_REPLICATIONCONTROLLER_NAME: Final = "k8s.replicationcontroller.name" +""" +The name of the replication controller. +""" + +K8S_REPLICATIONCONTROLLER_UID: Final = "k8s.replicationcontroller.uid" +""" +The UID of the replication controller. +""" + +K8S_RESOURCEQUOTA_NAME: Final = "k8s.resourcequota.name" +""" +The name of the resource quota. +""" + +K8S_RESOURCEQUOTA_UID: Final = "k8s.resourcequota.uid" +""" +The UID of the resource quota. +""" + +K8S_STATEFULSET_NAME: Final = "k8s.statefulset.name" +""" +The name of the StatefulSet. +""" + +K8S_STATEFULSET_UID: Final = "k8s.statefulset.uid" +""" +The UID of the StatefulSet. +""" + +K8S_VOLUME_NAME: Final = "k8s.volume.name" +""" +The name of the K8s volume. +""" + +K8S_VOLUME_TYPE: Final = "k8s.volume.type" +""" +The type of the K8s volume. +""" + + +class K8sNamespacePhaseValues(Enum): + ACTIVE = "active" + """Active namespace phase as described by [K8s API](https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase).""" + TERMINATING = "terminating" + """Terminating namespace phase as described by [K8s API](https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase).""" + + +class K8sVolumeTypeValues(Enum): + PERSISTENT_VOLUME_CLAIM = "persistentVolumeClaim" + """A [persistentVolumeClaim](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) volume.""" + CONFIG_MAP = "configMap" + """A [configMap](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap) volume.""" + DOWNWARD_API = "downwardAPI" + """A [downwardAPI](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi) volume.""" + EMPTY_DIR = "emptyDir" + """An [emptyDir](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume.""" + SECRET = "secret" + """A [secret](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret) volume.""" + LOCAL = "local" + """A [local](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local) volume.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/linux_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/linux_attributes.py new file mode 100644 index 00000000..d10147d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/linux_attributes.py @@ -0,0 +1,28 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +LINUX_MEMORY_SLAB_STATE: Final = "linux.memory.slab.state" +""" +The Linux Slab memory state. +""" + + +class LinuxMemorySlabStateValues(Enum): + RECLAIMABLE = "reclaimable" + """reclaimable.""" + UNRECLAIMABLE = "unreclaimable" + """unreclaimable.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/log_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/log_attributes.py new file mode 100644 index 00000000..cd1fbbc3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/log_attributes.py @@ -0,0 +1,61 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +LOG_FILE_NAME: Final = "log.file.name" +""" +The basename of the file. +""" + +LOG_FILE_NAME_RESOLVED: Final = "log.file.name_resolved" +""" +The basename of the file, with symlinks resolved. +""" + +LOG_FILE_PATH: Final = "log.file.path" +""" +The full path to the file. +""" + +LOG_FILE_PATH_RESOLVED: Final = "log.file.path_resolved" +""" +The full path to the file, with symlinks resolved. +""" + +LOG_IOSTREAM: Final = "log.iostream" +""" +The stream associated with the log. See below for a list of well-known values. +""" + +LOG_RECORD_ORIGINAL: Final = "log.record.original" +""" +The complete original Log Record. +Note: This value MAY be added when processing a Log Record which was originally transmitted as a string or equivalent data type AND the Body field of the Log Record does not contain the same value. (e.g. a syslog or a log record read from a file.). +""" + +LOG_RECORD_UID: Final = "log.record.uid" +""" +A unique identifier for the Log Record. +Note: If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. +The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed. +""" + + +class LogIostreamValues(Enum): + STDOUT = "stdout" + """Logs from stdout stream.""" + STDERR = "stderr" + """Events from stderr stream.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/message_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/message_attributes.py new file mode 100644 index 00000000..8728d81b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/message_attributes.py @@ -0,0 +1,48 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +MESSAGE_COMPRESSED_SIZE: Final = "message.compressed_size" +""" +Deprecated: Replaced by `rpc.message.compressed_size`. +""" + +MESSAGE_ID: Final = "message.id" +""" +Deprecated: Replaced by `rpc.message.id`. +""" + +MESSAGE_TYPE: Final = "message.type" +""" +Deprecated: Replaced by `rpc.message.type`. +""" + +MESSAGE_UNCOMPRESSED_SIZE: Final = "message.uncompressed_size" +""" +Deprecated: Replaced by `rpc.message.uncompressed_size`. +""" + + +@deprecated( + reason="The attribute message.type is deprecated - Replaced by `rpc.message.type`" +) # type: ignore +class MessageTypeValues(Enum): + SENT = "SENT" + """sent.""" + RECEIVED = "RECEIVED" + """received.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py new file mode 100644 index 00000000..88b374b4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py @@ -0,0 +1,373 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +MESSAGING_BATCH_MESSAGE_COUNT: Final = "messaging.batch.message_count" +""" +The number of messages sent, received, or processed in the scope of the batching operation. +Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs. +""" + +MESSAGING_CLIENT_ID: Final = "messaging.client.id" +""" +A unique identifier for the client that consumes or produces a message. +""" + +# MESSAGING_CLIENT_ID: Final = "messaging.client_id" +# Deprecated: Replaced by `messaging.client.id`. + +MESSAGING_CONSUMER_GROUP_NAME: Final = "messaging.consumer.group.name" +""" +The name of the consumer group with which a consumer is associated. +Note: Semantic conventions for individual messaging systems SHOULD document whether `messaging.consumer.group.name` is applicable and what it means in the context of that system. +""" + +MESSAGING_DESTINATION_ANONYMOUS: Final = "messaging.destination.anonymous" +""" +A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). +""" + +MESSAGING_DESTINATION_NAME: Final = "messaging.destination.name" +""" +The message destination name. +Note: Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If +the broker doesn't have such notion, the destination name SHOULD uniquely identify the broker. +""" + +MESSAGING_DESTINATION_PARTITION_ID: Final = ( + "messaging.destination.partition.id" +) +""" +The identifier of the partition messages are sent to or received from, unique within the `messaging.destination.name`. +""" + +MESSAGING_DESTINATION_SUBSCRIPTION_NAME: Final = ( + "messaging.destination.subscription.name" +) +""" +The name of the destination subscription from which a message is consumed. +Note: Semantic conventions for individual messaging systems SHOULD document whether `messaging.destination.subscription.name` is applicable and what it means in the context of that system. +""" + +MESSAGING_DESTINATION_TEMPLATE: Final = "messaging.destination.template" +""" +Low cardinality representation of the messaging destination name. +Note: Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation. +""" + +MESSAGING_DESTINATION_TEMPORARY: Final = "messaging.destination.temporary" +""" +A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. +""" + +MESSAGING_DESTINATION_PUBLISH_ANONYMOUS: Final = ( + "messaging.destination_publish.anonymous" +) +""" +Deprecated: No replacement at this time. +""" + +MESSAGING_DESTINATION_PUBLISH_NAME: Final = ( + "messaging.destination_publish.name" +) +""" +Deprecated: No replacement at this time. +""" + +MESSAGING_EVENTHUBS_CONSUMER_GROUP: Final = ( + "messaging.eventhubs.consumer.group" +) +""" +Deprecated: Replaced by `messaging.consumer.group.name`. +""" + +MESSAGING_EVENTHUBS_MESSAGE_ENQUEUED_TIME: Final = ( + "messaging.eventhubs.message.enqueued_time" +) +""" +The UTC epoch seconds at which the message has been accepted and stored in the entity. +""" + +MESSAGING_GCP_PUBSUB_MESSAGE_ACK_DEADLINE: Final = ( + "messaging.gcp_pubsub.message.ack_deadline" +) +""" +The ack deadline in seconds set for the modify ack deadline request. +""" + +MESSAGING_GCP_PUBSUB_MESSAGE_ACK_ID: Final = ( + "messaging.gcp_pubsub.message.ack_id" +) +""" +The ack id for a given message. +""" + +MESSAGING_GCP_PUBSUB_MESSAGE_DELIVERY_ATTEMPT: Final = ( + "messaging.gcp_pubsub.message.delivery_attempt" +) +""" +The delivery attempt for a given message. +""" + +MESSAGING_GCP_PUBSUB_MESSAGE_ORDERING_KEY: Final = ( + "messaging.gcp_pubsub.message.ordering_key" +) +""" +The ordering key for a given message. If the attribute is not present, the message does not have an ordering key. +""" + +MESSAGING_KAFKA_CONSUMER_GROUP: Final = "messaging.kafka.consumer.group" +""" +Deprecated: Replaced by `messaging.consumer.group.name`. +""" + +MESSAGING_KAFKA_DESTINATION_PARTITION: Final = ( + "messaging.kafka.destination.partition" +) +""" +Deprecated: Replaced by `messaging.destination.partition.id`. +""" + +MESSAGING_KAFKA_MESSAGE_KEY: Final = "messaging.kafka.message.key" +""" +Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. +Note: If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. +""" + +MESSAGING_KAFKA_MESSAGE_OFFSET: Final = "messaging.kafka.message.offset" +""" +Deprecated: Replaced by `messaging.kafka.offset`. +""" + +MESSAGING_KAFKA_MESSAGE_TOMBSTONE: Final = "messaging.kafka.message.tombstone" +""" +A boolean that is true if the message is a tombstone. +""" + +MESSAGING_KAFKA_OFFSET: Final = "messaging.kafka.offset" +""" +The offset of a record in the corresponding Kafka partition. +""" + +MESSAGING_MESSAGE_BODY_SIZE: Final = "messaging.message.body.size" +""" +The size of the message body in bytes. +Note: This can refer to both the compressed or uncompressed body size. If both sizes are known, the uncompressed +body size should be used. +""" + +MESSAGING_MESSAGE_CONVERSATION_ID: Final = "messaging.message.conversation_id" +""" +The conversation ID identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". +""" + +MESSAGING_MESSAGE_ENVELOPE_SIZE: Final = "messaging.message.envelope.size" +""" +The size of the message body and metadata in bytes. +Note: This can refer to both the compressed or uncompressed size. If both sizes are known, the uncompressed +size should be used. +""" + +MESSAGING_MESSAGE_ID: Final = "messaging.message.id" +""" +A value used by the messaging system as an identifier for the message, represented as a string. +""" + +MESSAGING_OPERATION: Final = "messaging.operation" +""" +Deprecated: Replaced by `messaging.operation.type`. +""" + +MESSAGING_OPERATION_NAME: Final = "messaging.operation.name" +""" +The system-specific name of the messaging operation. +""" + +MESSAGING_OPERATION_TYPE: Final = "messaging.operation.type" +""" +A string identifying the type of the messaging operation. +Note: If a custom value is used, it MUST be of low cardinality. +""" + +MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: Final = ( + "messaging.rabbitmq.destination.routing_key" +) +""" +RabbitMQ message routing key. +""" + +MESSAGING_RABBITMQ_MESSAGE_DELIVERY_TAG: Final = ( + "messaging.rabbitmq.message.delivery_tag" +) +""" +RabbitMQ message delivery tag. +""" + +MESSAGING_ROCKETMQ_CLIENT_GROUP: Final = "messaging.rocketmq.client_group" +""" +Deprecated: Replaced by `messaging.consumer.group.name` on the consumer spans. No replacement for producer spans. +""" + +MESSAGING_ROCKETMQ_CONSUMPTION_MODEL: Final = ( + "messaging.rocketmq.consumption_model" +) +""" +Model of message consumption. This only applies to consumer spans. +""" + +MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL: Final = ( + "messaging.rocketmq.message.delay_time_level" +) +""" +The delay time level for delay message, which determines the message delay time. +""" + +MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP: Final = ( + "messaging.rocketmq.message.delivery_timestamp" +) +""" +The timestamp in milliseconds that the delay message is expected to be delivered to consumer. +""" + +MESSAGING_ROCKETMQ_MESSAGE_GROUP: Final = "messaging.rocketmq.message.group" +""" +It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. +""" + +MESSAGING_ROCKETMQ_MESSAGE_KEYS: Final = "messaging.rocketmq.message.keys" +""" +Key(s) of message, another way to mark message besides message id. +""" + +MESSAGING_ROCKETMQ_MESSAGE_TAG: Final = "messaging.rocketmq.message.tag" +""" +The secondary classifier of message besides topic. +""" + +MESSAGING_ROCKETMQ_MESSAGE_TYPE: Final = "messaging.rocketmq.message.type" +""" +Type of message. +""" + +MESSAGING_ROCKETMQ_NAMESPACE: Final = "messaging.rocketmq.namespace" +""" +Namespace of RocketMQ resources, resources in different namespaces are individual. +""" + +MESSAGING_SERVICEBUS_DESTINATION_SUBSCRIPTION_NAME: Final = ( + "messaging.servicebus.destination.subscription_name" +) +""" +Deprecated: Replaced by `messaging.destination.subscription.name`. +""" + +MESSAGING_SERVICEBUS_DISPOSITION_STATUS: Final = ( + "messaging.servicebus.disposition_status" +) +""" +Describes the [settlement type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). +""" + +MESSAGING_SERVICEBUS_MESSAGE_DELIVERY_COUNT: Final = ( + "messaging.servicebus.message.delivery_count" +) +""" +Number of deliveries that have been attempted for this message. +""" + +MESSAGING_SERVICEBUS_MESSAGE_ENQUEUED_TIME: Final = ( + "messaging.servicebus.message.enqueued_time" +) +""" +The UTC epoch seconds at which the message has been accepted and stored in the entity. +""" + +MESSAGING_SYSTEM: Final = "messaging.system" +""" +The messaging system as identified by the client instrumentation. +Note: The actual messaging system may differ from the one known by the client. For example, when using Kafka client libraries to communicate with Azure Event Hubs, the `messaging.system` is set to `kafka` based on the instrumentation's best knowledge. +""" + + +class MessagingOperationTypeValues(Enum): + CREATE = "create" + """A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch sending scenarios.""" + SEND = "send" + """One or more messages are provided for sending to an intermediary. If a single message is sent, the context of the "Send" span can be used as the creation context and no "Create" span needs to be created.""" + RECEIVE = "receive" + """One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages.""" + PROCESS = "process" + """One or more messages are processed by a consumer.""" + SETTLE = "settle" + """One or more messages are settled.""" + DELIVER = "deliver" + """Deprecated: Replaced by `process`.""" + PUBLISH = "publish" + """Deprecated: Replaced by `send`.""" + + +class MessagingRocketmqConsumptionModelValues(Enum): + CLUSTERING = "clustering" + """Clustering consumption model.""" + BROADCASTING = "broadcasting" + """Broadcasting consumption model.""" + + +class MessagingRocketmqMessageTypeValues(Enum): + NORMAL = "normal" + """Normal message.""" + FIFO = "fifo" + """FIFO message.""" + DELAY = "delay" + """Delay message.""" + TRANSACTION = "transaction" + """Transaction message.""" + + +class MessagingServicebusDispositionStatusValues(Enum): + COMPLETE = "complete" + """Message is completed.""" + ABANDON = "abandon" + """Message is abandoned.""" + DEAD_LETTER = "dead_letter" + """Message is sent to dead letter queue.""" + DEFER = "defer" + """Message is deferred.""" + + +class MessagingSystemValues(Enum): + ACTIVEMQ = "activemq" + """Apache ActiveMQ.""" + AWS_SQS = "aws_sqs" + """Amazon Simple Queue Service (SQS).""" + EVENTGRID = "eventgrid" + """Azure Event Grid.""" + EVENTHUBS = "eventhubs" + """Azure Event Hubs.""" + SERVICEBUS = "servicebus" + """Azure Service Bus.""" + GCP_PUBSUB = "gcp_pubsub" + """Google Cloud Pub/Sub.""" + JMS = "jms" + """Java Message Service.""" + KAFKA = "kafka" + """Apache Kafka.""" + RABBITMQ = "rabbitmq" + """RabbitMQ.""" + ROCKETMQ = "rocketmq" + """Apache RocketMQ.""" + PULSAR = "pulsar" + """Apache Pulsar.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/net_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/net_attributes.py new file mode 100644 index 00000000..a1789a06 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/net_attributes.py @@ -0,0 +1,121 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +NET_HOST_IP: Final = "net.host.ip" +""" +Deprecated: Replaced by `network.local.address`. +""" + +NET_HOST_NAME: Final = "net.host.name" +""" +Deprecated: Replaced by `server.address`. +""" + +NET_HOST_PORT: Final = "net.host.port" +""" +Deprecated: Replaced by `server.port`. +""" + +NET_PEER_IP: Final = "net.peer.ip" +""" +Deprecated: Replaced by `network.peer.address`. +""" + +NET_PEER_NAME: Final = "net.peer.name" +""" +Deprecated: Replaced by `server.address` on client spans and `client.address` on server spans. +""" + +NET_PEER_PORT: Final = "net.peer.port" +""" +Deprecated: Replaced by `server.port` on client spans and `client.port` on server spans. +""" + +NET_PROTOCOL_NAME: Final = "net.protocol.name" +""" +Deprecated: Replaced by `network.protocol.name`. +""" + +NET_PROTOCOL_VERSION: Final = "net.protocol.version" +""" +Deprecated: Replaced by `network.protocol.version`. +""" + +NET_SOCK_FAMILY: Final = "net.sock.family" +""" +Deprecated: Split to `network.transport` and `network.type`. +""" + +NET_SOCK_HOST_ADDR: Final = "net.sock.host.addr" +""" +Deprecated: Replaced by `network.local.address`. +""" + +NET_SOCK_HOST_PORT: Final = "net.sock.host.port" +""" +Deprecated: Replaced by `network.local.port`. +""" + +NET_SOCK_PEER_ADDR: Final = "net.sock.peer.addr" +""" +Deprecated: Replaced by `network.peer.address`. +""" + +NET_SOCK_PEER_NAME: Final = "net.sock.peer.name" +""" +Deprecated: Removed. +""" + +NET_SOCK_PEER_PORT: Final = "net.sock.peer.port" +""" +Deprecated: Replaced by `network.peer.port`. +""" + +NET_TRANSPORT: Final = "net.transport" +""" +Deprecated: Replaced by `network.transport`. +""" + + +@deprecated( + reason="The attribute net.sock.family is deprecated - Split to `network.transport` and `network.type`" +) # type: ignore +class NetSockFamilyValues(Enum): + INET = "inet" + """IPv4 address.""" + INET6 = "inet6" + """IPv6 address.""" + UNIX = "unix" + """Unix domain socket path.""" + + +@deprecated( + reason="The attribute net.transport is deprecated - Replaced by `network.transport`" +) # type: ignore +class NetTransportValues(Enum): + IP_TCP = "ip_tcp" + """ip_tcp.""" + IP_UDP = "ip_udp" + """ip_udp.""" + PIPE = "pipe" + """Named or anonymous pipe.""" + INPROC = "inproc" + """In-process communication.""" + OTHER = "other" + """Something else (non IP-based).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/network_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/network_attributes.py new file mode 100644 index 00000000..10de3e54 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/network_attributes.py @@ -0,0 +1,220 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +NETWORK_CARRIER_ICC: Final = "network.carrier.icc" +""" +The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. +""" + +NETWORK_CARRIER_MCC: Final = "network.carrier.mcc" +""" +The mobile carrier country code. +""" + +NETWORK_CARRIER_MNC: Final = "network.carrier.mnc" +""" +The mobile carrier network code. +""" + +NETWORK_CARRIER_NAME: Final = "network.carrier.name" +""" +The name of the mobile carrier. +""" + +NETWORK_CONNECTION_STATE: Final = "network.connection.state" +""" +The state of network connection. +Note: Connection states are defined as part of the [rfc9293](https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2). +""" + +NETWORK_CONNECTION_SUBTYPE: Final = "network.connection.subtype" +""" +This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. +""" + +NETWORK_CONNECTION_TYPE: Final = "network.connection.type" +""" +The internet connection type. +""" + +NETWORK_INTERFACE_NAME: Final = "network.interface.name" +""" +The network interface name. +""" + +NETWORK_IO_DIRECTION: Final = "network.io.direction" +""" +The network IO operation direction. +""" + +NETWORK_LOCAL_ADDRESS: Final = "network.local.address" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_LOCAL_ADDRESS`. +""" + +NETWORK_LOCAL_PORT: Final = "network.local.port" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_LOCAL_PORT`. +""" + +NETWORK_PEER_ADDRESS: Final = "network.peer.address" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PEER_ADDRESS`. +""" + +NETWORK_PEER_PORT: Final = "network.peer.port" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PEER_PORT`. +""" + +NETWORK_PROTOCOL_NAME: Final = "network.protocol.name" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PROTOCOL_NAME`. +""" + +NETWORK_PROTOCOL_VERSION: Final = "network.protocol.version" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PROTOCOL_VERSION`. +""" + +NETWORK_TRANSPORT: Final = "network.transport" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_TRANSPORT`. +""" + +NETWORK_TYPE: Final = "network.type" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_TYPE`. +""" + + +class NetworkConnectionStateValues(Enum): + CLOSED = "closed" + """closed.""" + CLOSE_WAIT = "close_wait" + """close_wait.""" + CLOSING = "closing" + """closing.""" + ESTABLISHED = "established" + """established.""" + FIN_WAIT_1 = "fin_wait_1" + """fin_wait_1.""" + FIN_WAIT_2 = "fin_wait_2" + """fin_wait_2.""" + LAST_ACK = "last_ack" + """last_ack.""" + LISTEN = "listen" + """listen.""" + SYN_RECEIVED = "syn_received" + """syn_received.""" + SYN_SENT = "syn_sent" + """syn_sent.""" + TIME_WAIT = "time_wait" + """time_wait.""" + + +class NetworkConnectionSubtypeValues(Enum): + GPRS = "gprs" + """GPRS.""" + EDGE = "edge" + """EDGE.""" + UMTS = "umts" + """UMTS.""" + CDMA = "cdma" + """CDMA.""" + EVDO_0 = "evdo_0" + """EVDO Rel. 0.""" + EVDO_A = "evdo_a" + """EVDO Rev. A.""" + CDMA2000_1XRTT = "cdma2000_1xrtt" + """CDMA2000 1XRTT.""" + HSDPA = "hsdpa" + """HSDPA.""" + HSUPA = "hsupa" + """HSUPA.""" + HSPA = "hspa" + """HSPA.""" + IDEN = "iden" + """IDEN.""" + EVDO_B = "evdo_b" + """EVDO Rev. B.""" + LTE = "lte" + """LTE.""" + EHRPD = "ehrpd" + """EHRPD.""" + HSPAP = "hspap" + """HSPAP.""" + GSM = "gsm" + """GSM.""" + TD_SCDMA = "td_scdma" + """TD-SCDMA.""" + IWLAN = "iwlan" + """IWLAN.""" + NR = "nr" + """5G NR (New Radio).""" + NRNSA = "nrnsa" + """5G NRNSA (New Radio Non-Standalone).""" + LTE_CA = "lte_ca" + """LTE CA.""" + + +class NetworkConnectionTypeValues(Enum): + WIFI = "wifi" + """wifi.""" + WIRED = "wired" + """wired.""" + CELL = "cell" + """cell.""" + UNAVAILABLE = "unavailable" + """unavailable.""" + UNKNOWN = "unknown" + """unknown.""" + + +class NetworkIoDirectionValues(Enum): + TRANSMIT = "transmit" + """transmit.""" + RECEIVE = "receive" + """receive.""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues`." +) # type: ignore +class NetworkTransportValues(Enum): + TCP = "tcp" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.TCP`.""" + UDP = "udp" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.UDP`.""" + PIPE = "pipe" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.PIPE`.""" + UNIX = "unix" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.UNIX`.""" + QUIC = "quic" + """QUIC.""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues`." +) # type: ignore +class NetworkTypeValues(Enum): + IPV4 = "ipv4" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues.IPV4`.""" + IPV6 = "ipv6" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues.IPV6`.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/oci_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/oci_attributes.py new file mode 100644 index 00000000..ba721dff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/oci_attributes.py @@ -0,0 +1,22 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +OCI_MANIFEST_DIGEST: Final = "oci.manifest.digest" +""" +The digest of the OCI image manifest. For container images specifically is the digest by which the container image is known. +Note: Follows [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), and specifically the [Digest property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). +An example can be found in [Example Image Manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest). +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/opentracing_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/opentracing_attributes.py new file mode 100644 index 00000000..0c1ae088 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/opentracing_attributes.py @@ -0,0 +1,29 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +OPENTRACING_REF_TYPE: Final = "opentracing.ref_type" +""" +Parent-child Reference type. +Note: The causal relationship between a child Span and a parent Span. +""" + + +class OpentracingRefTypeValues(Enum): + CHILD_OF = "child_of" + """The parent Span depends on the child Span in some capacity.""" + FOLLOWS_FROM = "follows_from" + """The parent Span doesn't depend in any way on the result of the child Span.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/os_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/os_attributes.py new file mode 100644 index 00000000..0e899fe2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/os_attributes.py @@ -0,0 +1,66 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +OS_BUILD_ID: Final = "os.build_id" +""" +Unique identifier for a particular build or compilation of the operating system. +""" + +OS_DESCRIPTION: Final = "os.description" +""" +Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. +""" + +OS_NAME: Final = "os.name" +""" +Human readable operating system name. +""" + +OS_TYPE: Final = "os.type" +""" +The operating system type. +""" + +OS_VERSION: Final = "os.version" +""" +The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). +""" + + +class OsTypeValues(Enum): + WINDOWS = "windows" + """Microsoft Windows.""" + LINUX = "linux" + """Linux.""" + DARWIN = "darwin" + """Apple Darwin.""" + FREEBSD = "freebsd" + """FreeBSD.""" + NETBSD = "netbsd" + """NetBSD.""" + OPENBSD = "openbsd" + """OpenBSD.""" + DRAGONFLYBSD = "dragonflybsd" + """DragonFly BSD.""" + HPUX = "hpux" + """HP-UX (Hewlett Packard Unix).""" + AIX = "aix" + """AIX (Advanced Interactive eXecutive).""" + SOLARIS = "solaris" + """SunOS, Oracle Solaris.""" + Z_OS = "z_os" + """IBM z/OS.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/otel_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/otel_attributes.py new file mode 100644 index 00000000..d8b5eefe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/otel_attributes.py @@ -0,0 +1,110 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +OTEL_COMPONENT_NAME: Final = "otel.component.name" +""" +A name uniquely identifying the instance of the OpenTelemetry component within its containing SDK instance. +Note: Implementations SHOULD ensure a low cardinality for this attribute, even across application or SDK restarts. +E.g. implementations MUST NOT use UUIDs as values for this attribute. + +Implementations MAY achieve these goals by following a `<otel.component.type>/<instance-counter>` pattern, e.g. `batching_span_processor/0`. +Hereby `otel.component.type` refers to the corresponding attribute value of the component. + +The value of `instance-counter` MAY be automatically assigned by the component and uniqueness within the enclosing SDK instance MUST be guaranteed. +For example, `<instance-counter>` MAY be implemented by using a monotonically increasing counter (starting with `0`), which is incremented every time an +instance of the given component type is started. + +With this implementation, for example the first Batching Span Processor would have `batching_span_processor/0` +as `otel.component.name`, the second one `batching_span_processor/1` and so on. +These values will therefore be reused in the case of an application restart. +""" + +OTEL_COMPONENT_TYPE: Final = "otel.component.type" +""" +A name identifying the type of the OpenTelemetry component. +Note: If none of the standardized values apply, implementations SHOULD use the language-defined name of the type. +E.g. for Java the fully qualified classname SHOULD be used in this case. +""" + +OTEL_LIBRARY_NAME: Final = "otel.library.name" +""" +Deprecated: Use the `otel.scope.name` attribute. +""" + +OTEL_LIBRARY_VERSION: Final = "otel.library.version" +""" +Deprecated: Use the `otel.scope.version` attribute. +""" + +OTEL_SCOPE_NAME: Final = "otel.scope.name" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_NAME`. +""" + +OTEL_SCOPE_VERSION: Final = "otel.scope.version" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_VERSION`. +""" + +OTEL_SPAN_SAMPLING_RESULT: Final = "otel.span.sampling_result" +""" +The result value of the sampler for this span. +""" + +OTEL_STATUS_CODE: Final = "otel.status_code" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_STATUS_CODE`. +""" + +OTEL_STATUS_DESCRIPTION: Final = "otel.status_description" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_STATUS_DESCRIPTION`. +""" + + +class OtelComponentTypeValues(Enum): + BATCHING_SPAN_PROCESSOR = "batching_span_processor" + """The builtin SDK Batching Span Processor.""" + SIMPLE_SPAN_PROCESSOR = "simple_span_processor" + """The builtin SDK Simple Span Processor.""" + OTLP_GRPC_SPAN_EXPORTER = "otlp_grpc_span_exporter" + """OTLP span exporter over gRPC with protobuf serialization.""" + OTLP_HTTP_SPAN_EXPORTER = "otlp_http_span_exporter" + """OTLP span exporter over HTTP with protobuf serialization.""" + OTLP_HTTP_JSON_SPAN_EXPORTER = "otlp_http_json_span_exporter" + """OTLP span exporter over HTTP with JSON serialization.""" + + +class OtelSpanSamplingResultValues(Enum): + DROP = "DROP" + """The span is not sampled and not recording.""" + RECORD_ONLY = "RECORD_ONLY" + """The span is not sampled, but recording.""" + RECORD_AND_SAMPLE = "RECORD_AND_SAMPLE" + """The span is sampled and recording.""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues`." +) # type: ignore +class OtelStatusCodeValues(Enum): + OK = "OK" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues.OK`.""" + ERROR = "ERROR" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues.ERROR`.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/other_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/other_attributes.py new file mode 100644 index 00000000..2f4ae932 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/other_attributes.py @@ -0,0 +1,33 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +STATE: Final = "state" +""" +Deprecated: Replaced by `db.client.connection.state`. +""" + + +@deprecated( + reason="The attribute state is deprecated - Replaced by `db.client.connection.state`" +) # type: ignore +class StateValues(Enum): + IDLE = "idle" + """idle.""" + USED = "used" + """used.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/peer_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/peer_attributes.py new file mode 100644 index 00000000..eac8e77c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/peer_attributes.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +PEER_SERVICE: Final = "peer.service" +""" +The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/pool_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/pool_attributes.py new file mode 100644 index 00000000..6e0d70fa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/pool_attributes.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +POOL_NAME: Final = "pool.name" +""" +Deprecated: Replaced by `db.client.connection.pool.name`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/process_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/process_attributes.py new file mode 100644 index 00000000..9011c68f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/process_attributes.py @@ -0,0 +1,227 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +PROCESS_ARGS_COUNT: Final = "process.args_count" +""" +Length of the process.command_args array. +Note: This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. +""" + +PROCESS_COMMAND: Final = "process.command" +""" +The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. +""" + +PROCESS_COMMAND_ARGS: Final = "process.command_args" +""" +All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. +""" + +PROCESS_COMMAND_LINE: Final = "process.command_line" +""" +The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. +""" + +PROCESS_CONTEXT_SWITCH_TYPE: Final = "process.context_switch_type" +""" +Specifies whether the context switches for this data point were voluntary or involuntary. +""" + +PROCESS_CPU_STATE: Final = "process.cpu.state" +""" +Deprecated: Replaced by `cpu.mode`. +""" + +PROCESS_CREATION_TIME: Final = "process.creation.time" +""" +The date and time the process was created, in ISO 8601 format. +""" + +PROCESS_EXECUTABLE_BUILD_ID_GNU: Final = "process.executable.build_id.gnu" +""" +The GNU build ID as found in the `.note.gnu.build-id` ELF section (hex string). +""" + +PROCESS_EXECUTABLE_BUILD_ID_GO: Final = "process.executable.build_id.go" +""" +The Go build ID as retrieved by `go tool buildid <go executable>`. +""" + +PROCESS_EXECUTABLE_BUILD_ID_HTLHASH: Final = ( + "process.executable.build_id.htlhash" +) +""" +Profiling specific build ID for executables. See the OTel specification for Profiles for more information. +""" + +PROCESS_EXECUTABLE_BUILD_ID_PROFILING: Final = ( + "process.executable.build_id.profiling" +) +""" +Deprecated: Replaced by `process.executable.build_id.htlhash`. +""" + +PROCESS_EXECUTABLE_NAME: Final = "process.executable.name" +""" +The name of the process executable. On Linux based systems, this SHOULD be set to the base name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the base name of `GetProcessImageFileNameW`. +""" + +PROCESS_EXECUTABLE_PATH: Final = "process.executable.path" +""" +The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. +""" + +PROCESS_EXIT_CODE: Final = "process.exit.code" +""" +The exit code of the process. +""" + +PROCESS_EXIT_TIME: Final = "process.exit.time" +""" +The date and time the process exited, in ISO 8601 format. +""" + +PROCESS_GROUP_LEADER_PID: Final = "process.group_leader.pid" +""" +The PID of the process's group leader. This is also the process group ID (PGID) of the process. +""" + +PROCESS_INTERACTIVE: Final = "process.interactive" +""" +Whether the process is connected to an interactive shell. +""" + +PROCESS_LINUX_CGROUP: Final = "process.linux.cgroup" +""" +The control group associated with the process. +Note: Control groups (cgroups) are a kernel feature used to organize and manage process resources. This attribute provides the path(s) to the cgroup(s) associated with the process, which should match the contents of the [/proc/\\[PID\\]/cgroup](https://man7.org/linux/man-pages/man7/cgroups.7.html) file. +""" + +PROCESS_OWNER: Final = "process.owner" +""" +The username of the user that owns the process. +""" + +PROCESS_PAGING_FAULT_TYPE: Final = "process.paging.fault_type" +""" +The type of page fault for this data point. Type `major` is for major/hard page faults, and `minor` is for minor/soft page faults. +""" + +PROCESS_PARENT_PID: Final = "process.parent_pid" +""" +Parent Process identifier (PPID). +""" + +PROCESS_PID: Final = "process.pid" +""" +Process identifier (PID). +""" + +PROCESS_REAL_USER_ID: Final = "process.real_user.id" +""" +The real user ID (RUID) of the process. +""" + +PROCESS_REAL_USER_NAME: Final = "process.real_user.name" +""" +The username of the real user of the process. +""" + +PROCESS_RUNTIME_DESCRIPTION: Final = "process.runtime.description" +""" +An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. +""" + +PROCESS_RUNTIME_NAME: Final = "process.runtime.name" +""" +The name of the runtime of this process. +""" + +PROCESS_RUNTIME_VERSION: Final = "process.runtime.version" +""" +The version of the runtime of this process, as returned by the runtime without modification. +""" + +PROCESS_SAVED_USER_ID: Final = "process.saved_user.id" +""" +The saved user ID (SUID) of the process. +""" + +PROCESS_SAVED_USER_NAME: Final = "process.saved_user.name" +""" +The username of the saved user. +""" + +PROCESS_SESSION_LEADER_PID: Final = "process.session_leader.pid" +""" +The PID of the process's session leader. This is also the session ID (SID) of the process. +""" + +PROCESS_TITLE: Final = "process.title" +""" +Process title (proctitle). +Note: In many Unix-like systems, process title (proctitle), is the string that represents the name or command line of a running process, displayed by system monitoring tools like ps, top, and htop. +""" + +PROCESS_USER_ID: Final = "process.user.id" +""" +The effective user ID (EUID) of the process. +""" + +PROCESS_USER_NAME: Final = "process.user.name" +""" +The username of the effective user of the process. +""" + +PROCESS_VPID: Final = "process.vpid" +""" +Virtual process identifier. +Note: The process ID within a PID namespace. This is not necessarily unique across all processes on the host but it is unique within the process namespace that the process exists within. +""" + +PROCESS_WORKING_DIRECTORY: Final = "process.working_directory" +""" +The working directory of the process. +""" + + +class ProcessContextSwitchTypeValues(Enum): + VOLUNTARY = "voluntary" + """voluntary.""" + INVOLUNTARY = "involuntary" + """involuntary.""" + + +@deprecated( + reason="The attribute process.cpu.state is deprecated - Replaced by `cpu.mode`" +) # type: ignore +class ProcessCpuStateValues(Enum): + SYSTEM = "system" + """system.""" + USER = "user" + """user.""" + WAIT = "wait" + """wait.""" + + +class ProcessPagingFaultTypeValues(Enum): + MAJOR = "major" + """major.""" + MINOR = "minor" + """minor.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/profile_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/profile_attributes.py new file mode 100644 index 00000000..869f2591 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/profile_attributes.py @@ -0,0 +1,44 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +PROFILE_FRAME_TYPE: Final = "profile.frame.type" +""" +Describes the interpreter or compiler of a single frame. +""" + + +class ProfileFrameTypeValues(Enum): + DOTNET = "dotnet" + """[.NET](https://wikipedia.org/wiki/.NET).""" + JVM = "jvm" + """[JVM](https://wikipedia.org/wiki/Java_virtual_machine).""" + KERNEL = "kernel" + """[Kernel](https://wikipedia.org/wiki/Kernel_(operating_system)).""" + NATIVE = "native" + """[C](https://wikipedia.org/wiki/C_(programming_language)), [C++](https://wikipedia.org/wiki/C%2B%2B), [Go](https://wikipedia.org/wiki/Go_(programming_language)), [Rust](https://wikipedia.org/wiki/Rust_(programming_language)).""" + PERL = "perl" + """[Perl](https://wikipedia.org/wiki/Perl).""" + PHP = "php" + """[PHP](https://wikipedia.org/wiki/PHP).""" + CPYTHON = "cpython" + """[Python](https://wikipedia.org/wiki/Python_(programming_language)).""" + RUBY = "ruby" + """[Ruby](https://wikipedia.org/wiki/Ruby_(programming_language)).""" + V8JS = "v8js" + """[V8JS](https://wikipedia.org/wiki/V8_(JavaScript_engine)).""" + BEAM = "beam" + """[Erlang](https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)).""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py new file mode 100644 index 00000000..ff5b035a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py @@ -0,0 +1,204 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +RPC_CONNECT_RPC_ERROR_CODE: Final = "rpc.connect_rpc.error_code" +""" +The [error codes](https://connectrpc.com//docs/protocol/#error-codes) of the Connect request. Error codes are always string values. +""" + +RPC_CONNECT_RPC_REQUEST_METADATA_TEMPLATE: Final = ( + "rpc.connect_rpc.request.metadata" +) +""" +Connect request metadata, `<key>` being the normalized Connect Metadata key (lowercase), the value being the metadata values. +Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +""" + +RPC_CONNECT_RPC_RESPONSE_METADATA_TEMPLATE: Final = ( + "rpc.connect_rpc.response.metadata" +) +""" +Connect response metadata, `<key>` being the normalized Connect Metadata key (lowercase), the value being the metadata values. +Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +""" + +RPC_GRPC_REQUEST_METADATA_TEMPLATE: Final = "rpc.grpc.request.metadata" +""" +gRPC request metadata, `<key>` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. +Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +""" + +RPC_GRPC_RESPONSE_METADATA_TEMPLATE: Final = "rpc.grpc.response.metadata" +""" +gRPC response metadata, `<key>` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. +Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +""" + +RPC_GRPC_STATUS_CODE: Final = "rpc.grpc.status_code" +""" +The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. +""" + +RPC_JSONRPC_ERROR_CODE: Final = "rpc.jsonrpc.error_code" +""" +`error.code` property of response if it is an error response. +""" + +RPC_JSONRPC_ERROR_MESSAGE: Final = "rpc.jsonrpc.error_message" +""" +`error.message` property of response if it is an error response. +""" + +RPC_JSONRPC_REQUEST_ID: Final = "rpc.jsonrpc.request_id" +""" +`id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. +""" + +RPC_JSONRPC_VERSION: Final = "rpc.jsonrpc.version" +""" +Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't specify this, the value can be omitted. +""" + +RPC_MESSAGE_COMPRESSED_SIZE: Final = "rpc.message.compressed_size" +""" +Compressed size of the message in bytes. +""" + +RPC_MESSAGE_ID: Final = "rpc.message.id" +""" +MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. +Note: This way we guarantee that the values will be consistent between different implementations. +""" + +RPC_MESSAGE_TYPE: Final = "rpc.message.type" +""" +Whether this is a received or sent message. +""" + +RPC_MESSAGE_UNCOMPRESSED_SIZE: Final = "rpc.message.uncompressed_size" +""" +Uncompressed size of the message in bytes. +""" + +RPC_METHOD: Final = "rpc.method" +""" +The name of the (logical) method being called, must be equal to the $method part in the span name. +Note: This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function.name` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). +""" + +RPC_SERVICE: Final = "rpc.service" +""" +The full (logical) name of the service being called, including its package name, if applicable. +Note: This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side). +""" + +RPC_SYSTEM: Final = "rpc.system" +""" +A string identifying the remoting system. See below for a list of well-known identifiers. +""" + + +class RpcConnectRpcErrorCodeValues(Enum): + CANCELLED = "cancelled" + """cancelled.""" + UNKNOWN = "unknown" + """unknown.""" + INVALID_ARGUMENT = "invalid_argument" + """invalid_argument.""" + DEADLINE_EXCEEDED = "deadline_exceeded" + """deadline_exceeded.""" + NOT_FOUND = "not_found" + """not_found.""" + ALREADY_EXISTS = "already_exists" + """already_exists.""" + PERMISSION_DENIED = "permission_denied" + """permission_denied.""" + RESOURCE_EXHAUSTED = "resource_exhausted" + """resource_exhausted.""" + FAILED_PRECONDITION = "failed_precondition" + """failed_precondition.""" + ABORTED = "aborted" + """aborted.""" + OUT_OF_RANGE = "out_of_range" + """out_of_range.""" + UNIMPLEMENTED = "unimplemented" + """unimplemented.""" + INTERNAL = "internal" + """internal.""" + UNAVAILABLE = "unavailable" + """unavailable.""" + DATA_LOSS = "data_loss" + """data_loss.""" + UNAUTHENTICATED = "unauthenticated" + """unauthenticated.""" + + +class RpcGrpcStatusCodeValues(Enum): + OK = 0 + """OK.""" + CANCELLED = 1 + """CANCELLED.""" + UNKNOWN = 2 + """UNKNOWN.""" + INVALID_ARGUMENT = 3 + """INVALID_ARGUMENT.""" + DEADLINE_EXCEEDED = 4 + """DEADLINE_EXCEEDED.""" + NOT_FOUND = 5 + """NOT_FOUND.""" + ALREADY_EXISTS = 6 + """ALREADY_EXISTS.""" + PERMISSION_DENIED = 7 + """PERMISSION_DENIED.""" + RESOURCE_EXHAUSTED = 8 + """RESOURCE_EXHAUSTED.""" + FAILED_PRECONDITION = 9 + """FAILED_PRECONDITION.""" + ABORTED = 10 + """ABORTED.""" + OUT_OF_RANGE = 11 + """OUT_OF_RANGE.""" + UNIMPLEMENTED = 12 + """UNIMPLEMENTED.""" + INTERNAL = 13 + """INTERNAL.""" + UNAVAILABLE = 14 + """UNAVAILABLE.""" + DATA_LOSS = 15 + """DATA_LOSS.""" + UNAUTHENTICATED = 16 + """UNAUTHENTICATED.""" + + +class RpcMessageTypeValues(Enum): + SENT = "SENT" + """sent.""" + RECEIVED = "RECEIVED" + """received.""" + + +class RpcSystemValues(Enum): + GRPC = "grpc" + """gRPC.""" + JAVA_RMI = "java_rmi" + """Java RMI.""" + DOTNET_WCF = "dotnet_wcf" + """.NET WCF.""" + APACHE_DUBBO = "apache_dubbo" + """Apache Dubbo.""" + CONNECT_RPC = "connect_rpc" + """Connect RPC.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/security_rule_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/security_rule_attributes.py new file mode 100644 index 00000000..f6fbd0e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/security_rule_attributes.py @@ -0,0 +1,56 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SECURITY_RULE_CATEGORY: Final = "security_rule.category" +""" +A categorization value keyword used by the entity using the rule for detection of this event. +""" + +SECURITY_RULE_DESCRIPTION: Final = "security_rule.description" +""" +The description of the rule generating the event. +""" + +SECURITY_RULE_LICENSE: Final = "security_rule.license" +""" +Name of the license under which the rule used to generate this event is made available. +""" + +SECURITY_RULE_NAME: Final = "security_rule.name" +""" +The name of the rule or signature generating the event. +""" + +SECURITY_RULE_REFERENCE: Final = "security_rule.reference" +""" +Reference URL to additional information about the rule used to generate this event. +Note: The URL can point to the vendor’s documentation about the rule. If that’s not available, it can also be a link to a more general page describing this type of alert. +""" + +SECURITY_RULE_RULESET_NAME: Final = "security_rule.ruleset.name" +""" +Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. +""" + +SECURITY_RULE_UUID: Final = "security_rule.uuid" +""" +A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. +""" + +SECURITY_RULE_VERSION: Final = "security_rule.version" +""" +The version / revision of the rule being used for analysis. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/server_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/server_attributes.py new file mode 100644 index 00000000..a9e3ab43 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/server_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SERVER_ADDRESS: Final = "server.address" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.server_attributes.SERVER_ADDRESS`. +""" + +SERVER_PORT: Final = "server.port" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.server_attributes.SERVER_PORT`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/service_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/service_attributes.py new file mode 100644 index 00000000..f50686ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/service_attributes.py @@ -0,0 +1,62 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SERVICE_INSTANCE_ID: Final = "service.instance.id" +""" +The string ID of the service instance. +Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words +`service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to +distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled +service). + +Implementations, such as SDKs, are recommended to generate a random Version 1 or Version 4 [RFC +4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an inherent unique ID as the source of +this value if stability is desirable. In that case, the ID SHOULD be used as source of a UUID Version 5 and +SHOULD use the following UUID as the namespace: `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + +UUIDs are typically recommended, as only an opaque value for the purposes of identifying a service instance is +needed. Similar to what can be seen in the man page for the +[`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/latest/machine-id.html) file, the underlying +data, such as pod name and namespace should be treated as confidential, being the user's choice to expose it +or not via another resource attribute. + +For applications running behind an application server (like unicorn), we do not recommend using one identifier +for all processes participating in the application. Instead, it's recommended each division (e.g. a worker +thread in unicorn) to have its own instance.id. + +It's not recommended for a Collector to set `service.instance.id` if it can't unambiguously determine the +service instance that is generating that telemetry. For instance, creating an UUID based on `pod.name` will +likely be wrong, as the Collector might not know from which container within that pod the telemetry originated. +However, Collectors can set the `service.instance.id` if they can unambiguously determine the service instance +for that telemetry. This is typically the case for scraping receivers, as they know the target address and +port. +""" + +SERVICE_NAME: Final = "service.name" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.service_attributes.SERVICE_NAME`. +""" + +SERVICE_NAMESPACE: Final = "service.namespace" +""" +A namespace for `service.name`. +Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace. +""" + +SERVICE_VERSION: Final = "service.version" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.service_attributes.SERVICE_VERSION`. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/session_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/session_attributes.py new file mode 100644 index 00000000..1d5ff340 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/session_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SESSION_ID: Final = "session.id" +""" +A unique id to identify a session. +""" + +SESSION_PREVIOUS_ID: Final = "session.previous_id" +""" +The previous `session.id` for this user, when known. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/source_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/source_attributes.py new file mode 100644 index 00000000..ea49387f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/source_attributes.py @@ -0,0 +1,26 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SOURCE_ADDRESS: Final = "source.address" +""" +Source address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +Note: When observed from the destination side, and when communicating through an intermediary, `source.address` SHOULD represent the source address behind any intermediaries, for example proxies, if it's available. +""" + +SOURCE_PORT: Final = "source.port" +""" +Source port number. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/system_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/system_attributes.py new file mode 100644 index 00000000..aae23bf9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/system_attributes.py @@ -0,0 +1,221 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +SYSTEM_CPU_LOGICAL_NUMBER: Final = "system.cpu.logical_number" +""" +Deprecated, use `cpu.logical_number` instead. +""" + +SYSTEM_CPU_STATE: Final = "system.cpu.state" +""" +Deprecated: Replaced by `cpu.mode`. +""" + +SYSTEM_DEVICE: Final = "system.device" +""" +The device identifier. +""" + +SYSTEM_FILESYSTEM_MODE: Final = "system.filesystem.mode" +""" +The filesystem mode. +""" + +SYSTEM_FILESYSTEM_MOUNTPOINT: Final = "system.filesystem.mountpoint" +""" +The filesystem mount path. +""" + +SYSTEM_FILESYSTEM_STATE: Final = "system.filesystem.state" +""" +The filesystem state. +""" + +SYSTEM_FILESYSTEM_TYPE: Final = "system.filesystem.type" +""" +The filesystem type. +""" + +SYSTEM_MEMORY_STATE: Final = "system.memory.state" +""" +The memory state. +""" + +SYSTEM_NETWORK_STATE: Final = "system.network.state" +""" +Deprecated: Removed, report network connection state with `network.connection.state` attribute. +""" + +SYSTEM_PAGING_DIRECTION: Final = "system.paging.direction" +""" +The paging access direction. +""" + +SYSTEM_PAGING_STATE: Final = "system.paging.state" +""" +The memory paging state. +""" + +SYSTEM_PAGING_TYPE: Final = "system.paging.type" +""" +The memory paging type. +""" + +SYSTEM_PROCESS_STATUS: Final = "system.process.status" +""" +The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES). +""" + +SYSTEM_PROCESSES_STATUS: Final = "system.processes.status" +""" +Deprecated: Replaced by `system.process.status`. +""" + + +@deprecated( + reason="The attribute system.cpu.state is deprecated - Replaced by `cpu.mode`" +) # type: ignore +class SystemCpuStateValues(Enum): + USER = "user" + """user.""" + SYSTEM = "system" + """system.""" + NICE = "nice" + """nice.""" + IDLE = "idle" + """idle.""" + IOWAIT = "iowait" + """iowait.""" + INTERRUPT = "interrupt" + """interrupt.""" + STEAL = "steal" + """steal.""" + + +class SystemFilesystemStateValues(Enum): + USED = "used" + """used.""" + FREE = "free" + """free.""" + RESERVED = "reserved" + """reserved.""" + + +class SystemFilesystemTypeValues(Enum): + FAT32 = "fat32" + """fat32.""" + EXFAT = "exfat" + """exfat.""" + NTFS = "ntfs" + """ntfs.""" + REFS = "refs" + """refs.""" + HFSPLUS = "hfsplus" + """hfsplus.""" + EXT4 = "ext4" + """ext4.""" + + +class SystemMemoryStateValues(Enum): + USED = "used" + """used.""" + FREE = "free" + """free.""" + SHARED = "shared" + """Deprecated: Removed, report shared memory usage with `metric.system.memory.shared` metric.""" + BUFFERS = "buffers" + """buffers.""" + CACHED = "cached" + """cached.""" + + +@deprecated( + reason="The attribute system.network.state is deprecated - Removed, report network connection state with `network.connection.state` attribute" +) # type: ignore +class SystemNetworkStateValues(Enum): + CLOSE = "close" + """close.""" + CLOSE_WAIT = "close_wait" + """close_wait.""" + CLOSING = "closing" + """closing.""" + DELETE = "delete" + """delete.""" + ESTABLISHED = "established" + """established.""" + FIN_WAIT_1 = "fin_wait_1" + """fin_wait_1.""" + FIN_WAIT_2 = "fin_wait_2" + """fin_wait_2.""" + LAST_ACK = "last_ack" + """last_ack.""" + LISTEN = "listen" + """listen.""" + SYN_RECV = "syn_recv" + """syn_recv.""" + SYN_SENT = "syn_sent" + """syn_sent.""" + TIME_WAIT = "time_wait" + """time_wait.""" + + +class SystemPagingDirectionValues(Enum): + IN = "in" + """in.""" + OUT = "out" + """out.""" + + +class SystemPagingStateValues(Enum): + USED = "used" + """used.""" + FREE = "free" + """free.""" + + +class SystemPagingTypeValues(Enum): + MAJOR = "major" + """major.""" + MINOR = "minor" + """minor.""" + + +class SystemProcessStatusValues(Enum): + RUNNING = "running" + """running.""" + SLEEPING = "sleeping" + """sleeping.""" + STOPPED = "stopped" + """stopped.""" + DEFUNCT = "defunct" + """defunct.""" + + +@deprecated( + reason="The attribute system.processes.status is deprecated - Replaced by `system.process.status`" +) # type: ignore +class SystemProcessesStatusValues(Enum): + RUNNING = "running" + """running.""" + SLEEPING = "sleeping" + """sleeping.""" + STOPPED = "stopped" + """stopped.""" + DEFUNCT = "defunct" + """defunct.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/telemetry_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/telemetry_attributes.py new file mode 100644 index 00000000..3ba6d9f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/telemetry_attributes.py @@ -0,0 +1,75 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +TELEMETRY_DISTRO_NAME: Final = "telemetry.distro.name" +""" +The name of the auto instrumentation agent or distribution, if used. +Note: Official auto instrumentation agents and distributions SHOULD set the `telemetry.distro.name` attribute to +a string starting with `opentelemetry-`, e.g. `opentelemetry-java-instrumentation`. +""" + +TELEMETRY_DISTRO_VERSION: Final = "telemetry.distro.version" +""" +The version string of the auto instrumentation agent or distribution, if used. +""" + +TELEMETRY_SDK_LANGUAGE: Final = "telemetry.sdk.language" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_LANGUAGE`. +""" + +TELEMETRY_SDK_NAME: Final = "telemetry.sdk.name" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_NAME`. +""" + +TELEMETRY_SDK_VERSION: Final = "telemetry.sdk.version" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_VERSION`. +""" + + +@deprecated( + reason="Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues`." +) # type: ignore +class TelemetrySdkLanguageValues(Enum): + CPP = "cpp" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.CPP`.""" + DOTNET = "dotnet" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.DOTNET`.""" + ERLANG = "erlang" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.ERLANG`.""" + GO = "go" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.GO`.""" + JAVA = "java" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.JAVA`.""" + NODEJS = "nodejs" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.NODEJS`.""" + PHP = "php" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.PHP`.""" + PYTHON = "python" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.PYTHON`.""" + RUBY = "ruby" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.RUBY`.""" + RUST = "rust" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.RUST`.""" + SWIFT = "swift" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.SWIFT`.""" + WEBJS = "webjs" + """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.WEBJS`.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/test_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/test_attributes.py new file mode 100644 index 00000000..201c9bd8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/test_attributes.py @@ -0,0 +1,58 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +TEST_CASE_NAME: Final = "test.case.name" +""" +The fully qualified human readable name of the [test case](https://wikipedia.org/wiki/Test_case). +""" + +TEST_CASE_RESULT_STATUS: Final = "test.case.result.status" +""" +The status of the actual test case result from test execution. +""" + +TEST_SUITE_NAME: Final = "test.suite.name" +""" +The human readable name of a [test suite](https://wikipedia.org/wiki/Test_suite). +""" + +TEST_SUITE_RUN_STATUS: Final = "test.suite.run.status" +""" +The status of the test suite run. +""" + + +class TestCaseResultStatusValues(Enum): + PASS = "pass" + """pass.""" + FAIL = "fail" + """fail.""" + + +class TestSuiteRunStatusValues(Enum): + SUCCESS = "success" + """success.""" + FAILURE = "failure" + """failure.""" + SKIPPED = "skipped" + """skipped.""" + ABORTED = "aborted" + """aborted.""" + TIMED_OUT = "timed_out" + """timed_out.""" + IN_PROGRESS = "in_progress" + """in_progress.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/thread_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/thread_attributes.py new file mode 100644 index 00000000..a7b4ce82 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/thread_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +THREAD_ID: Final = "thread.id" +""" +Current "managed" thread ID (as opposed to OS thread ID). +""" + +THREAD_NAME: Final = "thread.name" +""" +Current thread name. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/tls_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/tls_attributes.py new file mode 100644 index 00000000..fa2b9169 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/tls_attributes.py @@ -0,0 +1,169 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +TLS_CIPHER: Final = "tls.cipher" +""" +String indicating the [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used during the current connection. +Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` of the [registered TLS Cipher Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). +""" + +TLS_CLIENT_CERTIFICATE: Final = "tls.client.certificate" +""" +PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. +""" + +TLS_CLIENT_CERTIFICATE_CHAIN: Final = "tls.client.certificate_chain" +""" +Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. +""" + +TLS_CLIENT_HASH_MD5: Final = "tls.client.hash.md5" +""" +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_CLIENT_HASH_SHA1: Final = "tls.client.hash.sha1" +""" +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_CLIENT_HASH_SHA256: Final = "tls.client.hash.sha256" +""" +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_CLIENT_ISSUER: Final = "tls.client.issuer" +""" +Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. +""" + +TLS_CLIENT_JA3: Final = "tls.client.ja3" +""" +A hash that identifies clients based on how they perform an SSL/TLS handshake. +""" + +TLS_CLIENT_NOT_AFTER: Final = "tls.client.not_after" +""" +Date/Time indicating when client certificate is no longer considered valid. +""" + +TLS_CLIENT_NOT_BEFORE: Final = "tls.client.not_before" +""" +Date/Time indicating when client certificate is first considered valid. +""" + +TLS_CLIENT_SERVER_NAME: Final = "tls.client.server_name" +""" +Deprecated: Replaced by `server.address`. +""" + +TLS_CLIENT_SUBJECT: Final = "tls.client.subject" +""" +Distinguished name of subject of the x.509 certificate presented by the client. +""" + +TLS_CLIENT_SUPPORTED_CIPHERS: Final = "tls.client.supported_ciphers" +""" +Array of ciphers offered by the client during the client hello. +""" + +TLS_CURVE: Final = "tls.curve" +""" +String indicating the curve used for the given cipher, when applicable. +""" + +TLS_ESTABLISHED: Final = "tls.established" +""" +Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. +""" + +TLS_NEXT_PROTOCOL: Final = "tls.next_protocol" +""" +String indicating the protocol being tunneled. Per the values in the [IANA registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. +""" + +TLS_PROTOCOL_NAME: Final = "tls.protocol.name" +""" +Normalized lowercase protocol name parsed from original string of the negotiated [SSL/TLS protocol version](https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values). +""" + +TLS_PROTOCOL_VERSION: Final = "tls.protocol.version" +""" +Numeric part of the version parsed from the original string of the negotiated [SSL/TLS protocol version](https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values). +""" + +TLS_RESUMED: Final = "tls.resumed" +""" +Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. +""" + +TLS_SERVER_CERTIFICATE: Final = "tls.server.certificate" +""" +PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. +""" + +TLS_SERVER_CERTIFICATE_CHAIN: Final = "tls.server.certificate_chain" +""" +Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. +""" + +TLS_SERVER_HASH_MD5: Final = "tls.server.hash.md5" +""" +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_SERVER_HASH_SHA1: Final = "tls.server.hash.sha1" +""" +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_SERVER_HASH_SHA256: Final = "tls.server.hash.sha256" +""" +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +""" + +TLS_SERVER_ISSUER: Final = "tls.server.issuer" +""" +Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. +""" + +TLS_SERVER_JA3S: Final = "tls.server.ja3s" +""" +A hash that identifies servers based on how they perform an SSL/TLS handshake. +""" + +TLS_SERVER_NOT_AFTER: Final = "tls.server.not_after" +""" +Date/Time indicating when server certificate is no longer considered valid. +""" + +TLS_SERVER_NOT_BEFORE: Final = "tls.server.not_before" +""" +Date/Time indicating when server certificate is first considered valid. +""" + +TLS_SERVER_SUBJECT: Final = "tls.server.subject" +""" +Distinguished name of subject of the x.509 certificate presented by the server. +""" + + +class TlsProtocolNameValues(Enum): + SSL = "ssl" + """ssl.""" + TLS = "tls" + """tls.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/url_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/url_attributes.py new file mode 100644 index 00000000..57d1de86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/url_attributes.py @@ -0,0 +1,87 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +URL_DOMAIN: Final = "url.domain" +""" +Domain extracted from the `url.full`, such as "opentelemetry.io". +Note: In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the domain field. If the URL contains a [literal IPv6 address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by `[` and `]`, the `[` and `]` characters should also be captured in the domain field. +""" + +URL_EXTENSION: Final = "url.extension" +""" +The file extension extracted from the `url.full`, excluding the leading dot. +Note: The file extension is only set if it exists, as not every url has a file extension. When the file name has multiple extensions `example.tar.gz`, only the last one should be captured `gz`, not `tar.gz`. +""" + +URL_FRAGMENT: Final = "url.fragment" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_FRAGMENT`. +""" + +URL_FULL: Final = "url.full" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_FULL`. +""" + +URL_ORIGINAL: Final = "url.original" +""" +Unmodified original URL as seen in the event source. +Note: In network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. This field is meant to represent the URL as it was observed, complete or not. +`url.original` might contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case password and username SHOULD NOT be redacted and attribute's value SHOULD remain the same. +""" + +URL_PATH: Final = "url.path" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_PATH`. +""" + +URL_PORT: Final = "url.port" +""" +Port extracted from the `url.full`. +""" + +URL_QUERY: Final = "url.query" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_QUERY`. +""" + +URL_REGISTERED_DOMAIN: Final = "url.registered_domain" +""" +The highest registered url domain, stripped of the subdomain. +Note: This value can be determined precisely with the [public suffix list](https://publicsuffix.org/). For example, the registered domain for `foo.example.com` is `example.com`. Trying to approximate this by simply taking the last two labels will not work well for TLDs such as `co.uk`. +""" + +URL_SCHEME: Final = "url.scheme" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_SCHEME`. +""" + +URL_SUBDOMAIN: Final = "url.subdomain" +""" +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, with no trailing period. +""" + +URL_TEMPLATE: Final = "url.template" +""" +The low-cardinality template of an [absolute path reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +""" + +URL_TOP_LEVEL_DOMAIN: Final = "url.top_level_domain" +""" +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is `com`. +Note: This value can be determined precisely with the [public suffix list](https://publicsuffix.org/). +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_agent_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_agent_attributes.py new file mode 100644 index 00000000..6c9e2699 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_agent_attributes.py @@ -0,0 +1,58 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +USER_AGENT_NAME: Final = "user_agent.name" +""" +Name of the user-agent extracted from original. Usually refers to the browser's name. +Note: [Example](https://www.whatsmyua.info) of extracting browser's name from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant name SHOULD be selected. In such a scenario it should align with `user_agent.version`. +""" + +USER_AGENT_ORIGINAL: Final = "user_agent.original" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.user_agent_attributes.USER_AGENT_ORIGINAL`. +""" + +USER_AGENT_OS_NAME: Final = "user_agent.os.name" +""" +Human readable operating system name. +Note: For mapping user agent strings to OS names, libraries such as [ua-parser](https://github.com/ua-parser) can be utilized. +""" + +USER_AGENT_OS_VERSION: Final = "user_agent.os.version" +""" +The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). +Note: For mapping user agent strings to OS versions, libraries such as [ua-parser](https://github.com/ua-parser) can be utilized. +""" + +USER_AGENT_SYNTHETIC_TYPE: Final = "user_agent.synthetic.type" +""" +Specifies the category of synthetic traffic, such as tests or bots. +Note: This attribute MAY be derived from the contents of the `user_agent.original` attribute. Components that populate the attribute are responsible for determining what they consider to be synthetic bot or test traffic. This attribute can either be set for self-identification purposes, or on telemetry detected to be generated as a result of a synthetic request. This attribute is useful for distinguishing between genuine client traffic and synthetic traffic generated by bots or tests. +""" + +USER_AGENT_VERSION: Final = "user_agent.version" +""" +Version of the user-agent extracted from original. Usually refers to the browser's version. +Note: [Example](https://www.whatsmyua.info) of extracting browser's version from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant version SHOULD be selected. In such a scenario it should align with `user_agent.name`. +""" + + +class UserAgentSyntheticTypeValues(Enum): + BOT = "bot" + """Bot source.""" + TEST = "test" + """Synthetic test source.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_attributes.py new file mode 100644 index 00000000..4d3e8a28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/user_attributes.py @@ -0,0 +1,46 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +USER_EMAIL: Final = "user.email" +""" +User email address. +""" + +USER_FULL_NAME: Final = "user.full_name" +""" +User's full name. +""" + +USER_HASH: Final = "user.hash" +""" +Unique user hash to correlate information for a user in anonymized form. +Note: Useful if `user.id` or `user.name` contain confidential information and cannot be used. +""" + +USER_ID: Final = "user.id" +""" +Unique identifier of the user. +""" + +USER_NAME: Final = "user.name" +""" +Short name or login/username of the user. +""" + +USER_ROLES: Final = "user.roles" +""" +Array of user roles at the time of the event. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py new file mode 100644 index 00000000..7ac3820a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py @@ -0,0 +1,208 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +from deprecated import deprecated + +VCS_CHANGE_ID: Final = "vcs.change.id" +""" +The ID of the change (pull request/merge request/changelist) if applicable. This is usually a unique (within repository) identifier generated by the VCS system. +""" + +VCS_CHANGE_STATE: Final = "vcs.change.state" +""" +The state of the change (pull request/merge request/changelist). +""" + +VCS_CHANGE_TITLE: Final = "vcs.change.title" +""" +The human readable title of the change (pull request/merge request/changelist). This title is often a brief summary of the change and may get merged in to a ref as the commit summary. +""" + +VCS_LINE_CHANGE_TYPE: Final = "vcs.line_change.type" +""" +The type of line change being measured on a branch or change. +""" + +VCS_REF_BASE_NAME: Final = "vcs.ref.base.name" +""" +The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. +Note: `base` refers to the starting point of a change. For example, `main` +would be the base reference of type branch if you've created a new +reference of type branch from it and created new commits. +""" + +VCS_REF_BASE_REVISION: Final = "vcs.ref.base.revision" +""" +The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. +Note: `base` refers to the starting point of a change. For example, `main` +would be the base reference of type branch if you've created a new +reference of type branch from it and created new commits. The +revision can be a full [hash value (see +glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), +of the recorded change to a ref within a repository pointing to a +commit [commit](https://git-scm.com/docs/git-commit) object. It does +not necessarily have to be a hash; it can simply define a [revision +number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) +which is an integer that is monotonically increasing. In cases where +it is identical to the `ref.base.name`, it SHOULD still be included. +It is up to the implementer to decide which value to set as the +revision based on the VCS system and situational context. +""" + +VCS_REF_BASE_TYPE: Final = "vcs.ref.base.type" +""" +The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. +Note: `base` refers to the starting point of a change. For example, `main` +would be the base reference of type branch if you've created a new +reference of type branch from it and created new commits. +""" + +VCS_REF_HEAD_NAME: Final = "vcs.ref.head.name" +""" +The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. +Note: `head` refers to where you are right now; the current reference at a +given time. +""" + +VCS_REF_HEAD_REVISION: Final = "vcs.ref.head.revision" +""" +The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. +Note: `head` refers to where you are right now; the current reference at a +given time.The revision can be a full [hash value (see +glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), +of the recorded change to a ref within a repository pointing to a +commit [commit](https://git-scm.com/docs/git-commit) object. It does +not necessarily have to be a hash; it can simply define a [revision +number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) +which is an integer that is monotonically increasing. In cases where +it is identical to the `ref.head.name`, it SHOULD still be included. +It is up to the implementer to decide which value to set as the +revision based on the VCS system and situational context. +""" + +VCS_REF_HEAD_TYPE: Final = "vcs.ref.head.type" +""" +The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. +Note: `head` refers to where you are right now; the current reference at a +given time. +""" + +VCS_REF_TYPE: Final = "vcs.ref.type" +""" +The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. +""" + +VCS_REPOSITORY_CHANGE_ID: Final = "vcs.repository.change.id" +""" +Deprecated: Deprecated, use `vcs.change.id` instead. +""" + +VCS_REPOSITORY_CHANGE_TITLE: Final = "vcs.repository.change.title" +""" +Deprecated: Deprecated, use `vcs.change.title` instead. +""" + +VCS_REPOSITORY_NAME: Final = "vcs.repository.name" +""" +The human readable name of the repository. It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab or organization in GitHub. +Note: Due to it only being the name, it can clash with forks of the same +repository if collecting telemetry across multiple orgs or groups in +the same backends. +""" + +VCS_REPOSITORY_REF_NAME: Final = "vcs.repository.ref.name" +""" +Deprecated: Deprecated, use `vcs.ref.head.name` instead. +""" + +VCS_REPOSITORY_REF_REVISION: Final = "vcs.repository.ref.revision" +""" +Deprecated: Deprecated, use `vcs.ref.head.revision` instead. +""" + +VCS_REPOSITORY_REF_TYPE: Final = "vcs.repository.ref.type" +""" +Deprecated: Deprecated, use `vcs.ref.head.type` instead. +""" + +VCS_REPOSITORY_URL_FULL: Final = "vcs.repository.url.full" +""" +The [canonical URL](https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.) of the repository providing the complete HTTP(S) address in order to locate and identify the repository through a browser. +Note: In Git Version Control Systems, the canonical URL SHOULD NOT include +the `.git` extension. +""" + +VCS_REVISION_DELTA_DIRECTION: Final = "vcs.revision_delta.direction" +""" +The type of revision comparison. +""" + + +class VcsChangeStateValues(Enum): + OPEN = "open" + """Open means the change is currently active and under review. It hasn't been merged into the target branch yet, and it's still possible to make changes or add comments.""" + WIP = "wip" + """WIP (work-in-progress, draft) means the change is still in progress and not yet ready for a full review. It might still undergo significant changes.""" + CLOSED = "closed" + """Closed means the merge request has been closed without merging. This can happen for various reasons, such as the changes being deemed unnecessary, the issue being resolved in another way, or the author deciding to withdraw the request.""" + MERGED = "merged" + """Merged indicates that the change has been successfully integrated into the target codebase.""" + + +class VcsLineChangeTypeValues(Enum): + ADDED = "added" + """How many lines were added.""" + REMOVED = "removed" + """How many lines were removed.""" + + +class VcsRefBaseTypeValues(Enum): + BRANCH = "branch" + """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" + TAG = "tag" + """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" + + +class VcsRefHeadTypeValues(Enum): + BRANCH = "branch" + """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" + TAG = "tag" + """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" + + +class VcsRefTypeValues(Enum): + BRANCH = "branch" + """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" + TAG = "tag" + """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" + + +@deprecated( + reason="The attribute vcs.repository.ref.type is deprecated - Deprecated, use `vcs.ref.head.type` instead" +) # type: ignore +class VcsRepositoryRefTypeValues(Enum): + BRANCH = "branch" + """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" + TAG = "tag" + """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" + + +class VcsRevisionDeltaDirectionValues(Enum): + BEHIND = "behind" + """How many revisions the change is behind the target ref.""" + AHEAD = "ahead" + """How many revisions the change is ahead of the target ref.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/webengine_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/webengine_attributes.py new file mode 100644 index 00000000..15175428 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/attributes/webengine_attributes.py @@ -0,0 +1,30 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +WEBENGINE_DESCRIPTION: Final = "webengine.description" +""" +Additional description of the web engine (e.g. detailed version and edition information). +""" + +WEBENGINE_NAME: Final = "webengine.name" +""" +The name of the web engine. +""" + +WEBENGINE_VERSION: Final = "webengine.version" +""" +The version of the web engine. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py new file mode 100644 index 00000000..2e45a2ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py @@ -0,0 +1,59 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Histogram, Meter, UpDownCounter + +AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT: Final = ( + "azure.cosmosdb.client.active_instance.count" +) +""" +Number of active client instances +Instrument: updowncounter +Unit: {instance} +""" + + +def create_azure_cosmosdb_client_active_instance_count( + meter: Meter, +) -> UpDownCounter: + """Number of active client instances""" + return meter.create_up_down_counter( + name=AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT, + description="Number of active client instances", + unit="{instance}", + ) + + +AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE: Final = ( + "azure.cosmosdb.client.operation.request_charge" +) +""" +[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation +Instrument: histogram +Unit: {request_unit} +""" + + +def create_azure_cosmosdb_client_operation_request_charge( + meter: Meter, +) -> Histogram: + """[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation""" + return meter.create_histogram( + name=AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE, + description="[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation", + unit="{request_unit}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py new file mode 100644 index 00000000..53fbfaca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py @@ -0,0 +1,105 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter + +CICD_PIPELINE_RUN_ACTIVE: Final = "cicd.pipeline.run.active" +""" +The number of pipeline runs currently active in the system by state +Instrument: updowncounter +Unit: {run} +""" + + +def create_cicd_pipeline_run_active(meter: Meter) -> UpDownCounter: + """The number of pipeline runs currently active in the system by state""" + return meter.create_up_down_counter( + name=CICD_PIPELINE_RUN_ACTIVE, + description="The number of pipeline runs currently active in the system by state.", + unit="{run}", + ) + + +CICD_PIPELINE_RUN_DURATION: Final = "cicd.pipeline.run.duration" +""" +Duration of a pipeline run grouped by pipeline, state and result +Instrument: histogram +Unit: s +""" + + +def create_cicd_pipeline_run_duration(meter: Meter) -> Histogram: + """Duration of a pipeline run grouped by pipeline, state and result""" + return meter.create_histogram( + name=CICD_PIPELINE_RUN_DURATION, + description="Duration of a pipeline run grouped by pipeline, state and result.", + unit="s", + ) + + +CICD_PIPELINE_RUN_ERRORS: Final = "cicd.pipeline.run.errors" +""" +The number of errors encountered in pipeline runs (eg. compile, test failures) +Instrument: counter +Unit: {error} +Note: There might be errors in a pipeline run that are non fatal (eg. they are suppressed) or in a parallel stage multiple stages could have a fatal error. +This means that this error count might not be the same as the count of metric `cicd.pipeline.run.duration` with run result `failure`. +""" + + +def create_cicd_pipeline_run_errors(meter: Meter) -> Counter: + """The number of errors encountered in pipeline runs (eg. compile, test failures)""" + return meter.create_counter( + name=CICD_PIPELINE_RUN_ERRORS, + description="The number of errors encountered in pipeline runs (eg. compile, test failures).", + unit="{error}", + ) + + +CICD_SYSTEM_ERRORS: Final = "cicd.system.errors" +""" +The number of errors in a component of the CICD system (eg. controller, scheduler, agent) +Instrument: counter +Unit: {error} +Note: Errors in pipeline run execution are explicitly excluded. Ie a test failure is not counted in this metric. +""" + + +def create_cicd_system_errors(meter: Meter) -> Counter: + """The number of errors in a component of the CICD system (eg. controller, scheduler, agent)""" + return meter.create_counter( + name=CICD_SYSTEM_ERRORS, + description="The number of errors in a component of the CICD system (eg. controller, scheduler, agent).", + unit="{error}", + ) + + +CICD_WORKER_COUNT: Final = "cicd.worker.count" +""" +The number of workers on the CICD system by state +Instrument: updowncounter +Unit: {count} +""" + + +def create_cicd_worker_count(meter: Meter) -> UpDownCounter: + """The number of workers on the CICD system by state""" + return meter.create_up_down_counter( + name=CICD_WORKER_COUNT, + description="The number of workers on the CICD system by state.", + unit="{count}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py new file mode 100644 index 00000000..ca4a9131 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py @@ -0,0 +1,152 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +CONTAINER_CPU_TIME: Final = "container.cpu.time" +""" +Total CPU time consumed +Instrument: counter +Unit: s +Note: Total CPU time consumed by the specific container on all available CPU cores. +""" + + +def create_container_cpu_time(meter: Meter) -> Counter: + """Total CPU time consumed""" + return meter.create_counter( + name=CONTAINER_CPU_TIME, + description="Total CPU time consumed", + unit="s", + ) + + +CONTAINER_CPU_USAGE: Final = "container.cpu.usage" +""" +Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +Instrument: gauge +Unit: {cpu} +Note: CPU usage of the specific container on all available CPU cores, averaged over the sample window. +""" + + +def create_container_cpu_usage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" + return meter.create_observable_gauge( + name=CONTAINER_CPU_USAGE, + callbacks=callbacks, + description="Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + unit="{cpu}", + ) + + +CONTAINER_DISK_IO: Final = "container.disk.io" +""" +Disk bytes for the container +Instrument: counter +Unit: By +Note: The total number of bytes read/written successfully (aggregated from all disks). +""" + + +def create_container_disk_io(meter: Meter) -> Counter: + """Disk bytes for the container""" + return meter.create_counter( + name=CONTAINER_DISK_IO, + description="Disk bytes for the container.", + unit="By", + ) + + +CONTAINER_MEMORY_USAGE: Final = "container.memory.usage" +""" +Memory usage of the container +Instrument: counter +Unit: By +Note: Memory usage of the container. +""" + + +def create_container_memory_usage(meter: Meter) -> Counter: + """Memory usage of the container""" + return meter.create_counter( + name=CONTAINER_MEMORY_USAGE, + description="Memory usage of the container.", + unit="By", + ) + + +CONTAINER_NETWORK_IO: Final = "container.network.io" +""" +Network bytes for the container +Instrument: counter +Unit: By +Note: The number of bytes sent/received on all network interfaces by the container. +""" + + +def create_container_network_io(meter: Meter) -> Counter: + """Network bytes for the container""" + return meter.create_counter( + name=CONTAINER_NETWORK_IO, + description="Network bytes for the container.", + unit="By", + ) + + +CONTAINER_UPTIME: Final = "container.uptime" +""" +The time the container has been running +Instrument: gauge +Unit: s +Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +The actual accuracy would depend on the instrumentation and operating system. +""" + + +def create_container_uptime( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time the container has been running""" + return meter.create_observable_gauge( + name=CONTAINER_UPTIME, + callbacks=callbacks, + description="The time the container has been running", + unit="s", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py new file mode 100644 index 00000000..86bc5a67 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py @@ -0,0 +1,94 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +CPU_FREQUENCY: Final = "cpu.frequency" +""" +Operating frequency of the logical CPU in Hertz +Instrument: gauge +Unit: Hz +""" + + +def create_cpu_frequency( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Operating frequency of the logical CPU in Hertz""" + return meter.create_observable_gauge( + name=CPU_FREQUENCY, + callbacks=callbacks, + description="Operating frequency of the logical CPU in Hertz.", + unit="Hz", + ) + + +CPU_TIME: Final = "cpu.time" +""" +Seconds each logical CPU spent on each mode +Instrument: counter +Unit: s +""" + + +def create_cpu_time(meter: Meter) -> Counter: + """Seconds each logical CPU spent on each mode""" + return meter.create_counter( + name=CPU_TIME, + description="Seconds each logical CPU spent on each mode", + unit="s", + ) + + +CPU_UTILIZATION: Final = "cpu.utilization" +""" +For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time +Instrument: gauge +Unit: 1 +""" + + +def create_cpu_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time""" + return meter.create_observable_gauge( + name=CPU_UTILIZATION, + callbacks=callbacks, + description="For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time.", + unit="1", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py new file mode 100644 index 00000000..32c0f55f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py @@ -0,0 +1,386 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter + +DB_CLIENT_CONNECTION_COUNT: Final = "db.client.connection.count" +""" +The number of connections that are currently in state described by the `state` attribute +Instrument: updowncounter +Unit: {connection} +""" + + +def create_db_client_connection_count(meter: Meter) -> UpDownCounter: + """The number of connections that are currently in state described by the `state` attribute""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTION_COUNT, + description="The number of connections that are currently in state described by the `state` attribute", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTION_CREATE_TIME: Final = "db.client.connection.create_time" +""" +The time it took to create a new connection +Instrument: histogram +Unit: s +""" + + +def create_db_client_connection_create_time(meter: Meter) -> Histogram: + """The time it took to create a new connection""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTION_CREATE_TIME, + description="The time it took to create a new connection", + unit="s", + ) + + +DB_CLIENT_CONNECTION_IDLE_MAX: Final = "db.client.connection.idle.max" +""" +The maximum number of idle open connections allowed +Instrument: updowncounter +Unit: {connection} +""" + + +def create_db_client_connection_idle_max(meter: Meter) -> UpDownCounter: + """The maximum number of idle open connections allowed""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTION_IDLE_MAX, + description="The maximum number of idle open connections allowed", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTION_IDLE_MIN: Final = "db.client.connection.idle.min" +""" +The minimum number of idle open connections allowed +Instrument: updowncounter +Unit: {connection} +""" + + +def create_db_client_connection_idle_min(meter: Meter) -> UpDownCounter: + """The minimum number of idle open connections allowed""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTION_IDLE_MIN, + description="The minimum number of idle open connections allowed", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTION_MAX: Final = "db.client.connection.max" +""" +The maximum number of open connections allowed +Instrument: updowncounter +Unit: {connection} +""" + + +def create_db_client_connection_max(meter: Meter) -> UpDownCounter: + """The maximum number of open connections allowed""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTION_MAX, + description="The maximum number of open connections allowed", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTION_PENDING_REQUESTS: Final = ( + "db.client.connection.pending_requests" +) +""" +The number of current pending requests for an open connection +Instrument: updowncounter +Unit: {request} +""" + + +def create_db_client_connection_pending_requests( + meter: Meter, +) -> UpDownCounter: + """The number of current pending requests for an open connection""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTION_PENDING_REQUESTS, + description="The number of current pending requests for an open connection", + unit="{request}", + ) + + +DB_CLIENT_CONNECTION_TIMEOUTS: Final = "db.client.connection.timeouts" +""" +The number of connection timeouts that have occurred trying to obtain a connection from the pool +Instrument: counter +Unit: {timeout} +""" + + +def create_db_client_connection_timeouts(meter: Meter) -> Counter: + """The number of connection timeouts that have occurred trying to obtain a connection from the pool""" + return meter.create_counter( + name=DB_CLIENT_CONNECTION_TIMEOUTS, + description="The number of connection timeouts that have occurred trying to obtain a connection from the pool", + unit="{timeout}", + ) + + +DB_CLIENT_CONNECTION_USE_TIME: Final = "db.client.connection.use_time" +""" +The time between borrowing a connection and returning it to the pool +Instrument: histogram +Unit: s +""" + + +def create_db_client_connection_use_time(meter: Meter) -> Histogram: + """The time between borrowing a connection and returning it to the pool""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTION_USE_TIME, + description="The time between borrowing a connection and returning it to the pool", + unit="s", + ) + + +DB_CLIENT_CONNECTION_WAIT_TIME: Final = "db.client.connection.wait_time" +""" +The time it took to obtain an open connection from the pool +Instrument: histogram +Unit: s +""" + + +def create_db_client_connection_wait_time(meter: Meter) -> Histogram: + """The time it took to obtain an open connection from the pool""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTION_WAIT_TIME, + description="The time it took to obtain an open connection from the pool", + unit="s", + ) + + +DB_CLIENT_CONNECTIONS_CREATE_TIME: Final = "db.client.connections.create_time" +""" +Deprecated: Replaced by `db.client.connection.create_time`. Note: the unit also changed from `ms` to `s`. +""" + + +def create_db_client_connections_create_time(meter: Meter) -> Histogram: + """Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTIONS_CREATE_TIME, + description="Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`.", + unit="ms", + ) + + +DB_CLIENT_CONNECTIONS_IDLE_MAX: Final = "db.client.connections.idle.max" +""" +Deprecated: Replaced by `db.client.connection.idle.max`. +""" + + +def create_db_client_connections_idle_max(meter: Meter) -> UpDownCounter: + """Deprecated, use `db.client.connection.idle.max` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTIONS_IDLE_MAX, + description="Deprecated, use `db.client.connection.idle.max` instead.", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTIONS_IDLE_MIN: Final = "db.client.connections.idle.min" +""" +Deprecated: Replaced by `db.client.connection.idle.min`. +""" + + +def create_db_client_connections_idle_min(meter: Meter) -> UpDownCounter: + """Deprecated, use `db.client.connection.idle.min` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTIONS_IDLE_MIN, + description="Deprecated, use `db.client.connection.idle.min` instead.", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTIONS_MAX: Final = "db.client.connections.max" +""" +Deprecated: Replaced by `db.client.connection.max`. +""" + + +def create_db_client_connections_max(meter: Meter) -> UpDownCounter: + """Deprecated, use `db.client.connection.max` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTIONS_MAX, + description="Deprecated, use `db.client.connection.max` instead.", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: Final = ( + "db.client.connections.pending_requests" +) +""" +Deprecated: Replaced by `db.client.connection.pending_requests`. +""" + + +def create_db_client_connections_pending_requests( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `db.client.connection.pending_requests` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTIONS_PENDING_REQUESTS, + description="Deprecated, use `db.client.connection.pending_requests` instead.", + unit="{request}", + ) + + +DB_CLIENT_CONNECTIONS_TIMEOUTS: Final = "db.client.connections.timeouts" +""" +Deprecated: Replaced by `db.client.connection.timeouts`. +""" + + +def create_db_client_connections_timeouts(meter: Meter) -> Counter: + """Deprecated, use `db.client.connection.timeouts` instead""" + return meter.create_counter( + name=DB_CLIENT_CONNECTIONS_TIMEOUTS, + description="Deprecated, use `db.client.connection.timeouts` instead.", + unit="{timeout}", + ) + + +DB_CLIENT_CONNECTIONS_USAGE: Final = "db.client.connections.usage" +""" +Deprecated: Replaced by `db.client.connection.count`. +""" + + +def create_db_client_connections_usage(meter: Meter) -> UpDownCounter: + """Deprecated, use `db.client.connection.count` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_CONNECTIONS_USAGE, + description="Deprecated, use `db.client.connection.count` instead.", + unit="{connection}", + ) + + +DB_CLIENT_CONNECTIONS_USE_TIME: Final = "db.client.connections.use_time" +""" +Deprecated: Replaced by `db.client.connection.use_time`. Note: the unit also changed from `ms` to `s`. +""" + + +def create_db_client_connections_use_time(meter: Meter) -> Histogram: + """Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTIONS_USE_TIME, + description="Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`.", + unit="ms", + ) + + +DB_CLIENT_CONNECTIONS_WAIT_TIME: Final = "db.client.connections.wait_time" +""" +Deprecated: Replaced by `db.client.connection.wait_time`. Note: the unit also changed from `ms` to `s`. +""" + + +def create_db_client_connections_wait_time(meter: Meter) -> Histogram: + """Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`""" + return meter.create_histogram( + name=DB_CLIENT_CONNECTIONS_WAIT_TIME, + description="Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`.", + unit="ms", + ) + + +DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: Final = ( + "db.client.cosmosdb.active_instance.count" +) +""" +Deprecated: Replaced by `azure.cosmosdb.client.active_instance.count`. +""" + + +def create_db_client_cosmosdb_active_instance_count( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `azure.cosmosdb.client.active_instance.count` instead""" + return meter.create_up_down_counter( + name=DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT, + description="Deprecated, use `azure.cosmosdb.client.active_instance.count` instead.", + unit="{instance}", + ) + + +DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = ( + "db.client.cosmosdb.operation.request_charge" +) +""" +Deprecated: Replaced by `azure.cosmosdb.client.operation.request_charge`. +""" + + +def create_db_client_cosmosdb_operation_request_charge( + meter: Meter, +) -> Histogram: + """Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead""" + return meter.create_histogram( + name=DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE, + description="Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead.", + unit="{request_unit}", + ) + + +DB_CLIENT_OPERATION_DURATION: Final = "db.client.operation.duration" +""" +Duration of database client operations +Instrument: histogram +Unit: s +Note: Batch operations SHOULD be recorded as a single operation. +""" + + +def create_db_client_operation_duration(meter: Meter) -> Histogram: + """Duration of database client operations""" + return meter.create_histogram( + name=DB_CLIENT_OPERATION_DURATION, + description="Duration of database client operations.", + unit="s", + ) + + +DB_CLIENT_RESPONSE_RETURNED_ROWS: Final = "db.client.response.returned_rows" +""" +The actual number of records returned by the database operation +Instrument: histogram +Unit: {row} +""" + + +def create_db_client_response_returned_rows(meter: Meter) -> Histogram: + """The actual number of records returned by the database operation""" + return meter.create_histogram( + name=DB_CLIENT_RESPONSE_RETURNED_ROWS, + description="The actual number of records returned by the database operation.", + unit="{row}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py new file mode 100644 index 00000000..53fb3d26 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py @@ -0,0 +1,34 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Histogram, Meter + +DNS_LOOKUP_DURATION: Final = "dns.lookup.duration" +""" +Measures the time taken to perform a DNS lookup +Instrument: histogram +Unit: s +""" + + +def create_dns_lookup_duration(meter: Meter) -> Histogram: + """Measures the time taken to perform a DNS lookup""" + return meter.create_histogram( + name=DNS_LOOKUP_DURATION, + description="Measures the time taken to perform a DNS lookup.", + unit="s", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py new file mode 100644 index 00000000..5fd14149 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py @@ -0,0 +1,170 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Histogram, Meter + +FAAS_COLDSTARTS: Final = "faas.coldstarts" +""" +Number of invocation cold starts +Instrument: counter +Unit: {coldstart} +""" + + +def create_faas_coldstarts(meter: Meter) -> Counter: + """Number of invocation cold starts""" + return meter.create_counter( + name=FAAS_COLDSTARTS, + description="Number of invocation cold starts", + unit="{coldstart}", + ) + + +FAAS_CPU_USAGE: Final = "faas.cpu_usage" +""" +Distribution of CPU usage per invocation +Instrument: histogram +Unit: s +""" + + +def create_faas_cpu_usage(meter: Meter) -> Histogram: + """Distribution of CPU usage per invocation""" + return meter.create_histogram( + name=FAAS_CPU_USAGE, + description="Distribution of CPU usage per invocation", + unit="s", + ) + + +FAAS_ERRORS: Final = "faas.errors" +""" +Number of invocation errors +Instrument: counter +Unit: {error} +""" + + +def create_faas_errors(meter: Meter) -> Counter: + """Number of invocation errors""" + return meter.create_counter( + name=FAAS_ERRORS, + description="Number of invocation errors", + unit="{error}", + ) + + +FAAS_INIT_DURATION: Final = "faas.init_duration" +""" +Measures the duration of the function's initialization, such as a cold start +Instrument: histogram +Unit: s +""" + + +def create_faas_init_duration(meter: Meter) -> Histogram: + """Measures the duration of the function's initialization, such as a cold start""" + return meter.create_histogram( + name=FAAS_INIT_DURATION, + description="Measures the duration of the function's initialization, such as a cold start", + unit="s", + ) + + +FAAS_INVOCATIONS: Final = "faas.invocations" +""" +Number of successful invocations +Instrument: counter +Unit: {invocation} +""" + + +def create_faas_invocations(meter: Meter) -> Counter: + """Number of successful invocations""" + return meter.create_counter( + name=FAAS_INVOCATIONS, + description="Number of successful invocations", + unit="{invocation}", + ) + + +FAAS_INVOKE_DURATION: Final = "faas.invoke_duration" +""" +Measures the duration of the function's logic execution +Instrument: histogram +Unit: s +""" + + +def create_faas_invoke_duration(meter: Meter) -> Histogram: + """Measures the duration of the function's logic execution""" + return meter.create_histogram( + name=FAAS_INVOKE_DURATION, + description="Measures the duration of the function's logic execution", + unit="s", + ) + + +FAAS_MEM_USAGE: Final = "faas.mem_usage" +""" +Distribution of max memory usage per invocation +Instrument: histogram +Unit: By +""" + + +def create_faas_mem_usage(meter: Meter) -> Histogram: + """Distribution of max memory usage per invocation""" + return meter.create_histogram( + name=FAAS_MEM_USAGE, + description="Distribution of max memory usage per invocation", + unit="By", + ) + + +FAAS_NET_IO: Final = "faas.net_io" +""" +Distribution of net I/O usage per invocation +Instrument: histogram +Unit: By +""" + + +def create_faas_net_io(meter: Meter) -> Histogram: + """Distribution of net I/O usage per invocation""" + return meter.create_histogram( + name=FAAS_NET_IO, + description="Distribution of net I/O usage per invocation", + unit="By", + ) + + +FAAS_TIMEOUTS: Final = "faas.timeouts" +""" +Number of invocation timeouts +Instrument: counter +Unit: {timeout} +""" + + +def create_faas_timeouts(meter: Meter) -> Counter: + """Number of invocation timeouts""" + return meter.create_counter( + name=FAAS_TIMEOUTS, + description="Number of invocation timeouts", + unit="{timeout}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py new file mode 100644 index 00000000..97d9dd00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py @@ -0,0 +1,104 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Histogram, Meter + +GEN_AI_CLIENT_OPERATION_DURATION: Final = "gen_ai.client.operation.duration" +""" +GenAI operation duration +Instrument: histogram +Unit: s +""" + + +def create_gen_ai_client_operation_duration(meter: Meter) -> Histogram: + """GenAI operation duration""" + return meter.create_histogram( + name=GEN_AI_CLIENT_OPERATION_DURATION, + description="GenAI operation duration", + unit="s", + ) + + +GEN_AI_CLIENT_TOKEN_USAGE: Final = "gen_ai.client.token.usage" +""" +Measures number of input and output tokens used +Instrument: histogram +Unit: {token} +""" + + +def create_gen_ai_client_token_usage(meter: Meter) -> Histogram: + """Measures number of input and output tokens used""" + return meter.create_histogram( + name=GEN_AI_CLIENT_TOKEN_USAGE, + description="Measures number of input and output tokens used", + unit="{token}", + ) + + +GEN_AI_SERVER_REQUEST_DURATION: Final = "gen_ai.server.request.duration" +""" +Generative AI server request duration such as time-to-last byte or last output token +Instrument: histogram +Unit: s +""" + + +def create_gen_ai_server_request_duration(meter: Meter) -> Histogram: + """Generative AI server request duration such as time-to-last byte or last output token""" + return meter.create_histogram( + name=GEN_AI_SERVER_REQUEST_DURATION, + description="Generative AI server request duration such as time-to-last byte or last output token", + unit="s", + ) + + +GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: Final = ( + "gen_ai.server.time_per_output_token" +) +""" +Time per output token generated after the first token for successful responses +Instrument: histogram +Unit: s +""" + + +def create_gen_ai_server_time_per_output_token(meter: Meter) -> Histogram: + """Time per output token generated after the first token for successful responses""" + return meter.create_histogram( + name=GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN, + description="Time per output token generated after the first token for successful responses", + unit="s", + ) + + +GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: Final = "gen_ai.server.time_to_first_token" +""" +Time to generate first token for successful responses +Instrument: histogram +Unit: s +""" + + +def create_gen_ai_server_time_to_first_token(meter: Meter) -> Histogram: + """Time to generate first token for successful responses""" + return meter.create_histogram( + name=GEN_AI_SERVER_TIME_TO_FIRST_TOKEN, + description="Time to generate first token for successful responses", + unit="s", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py new file mode 100644 index 00000000..86d0317e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py @@ -0,0 +1,187 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Histogram, Meter, UpDownCounter + +HTTP_CLIENT_ACTIVE_REQUESTS: Final = "http.client.active_requests" +""" +Number of active HTTP requests +Instrument: updowncounter +Unit: {request} +""" + + +def create_http_client_active_requests(meter: Meter) -> UpDownCounter: + """Number of active HTTP requests""" + return meter.create_up_down_counter( + name=HTTP_CLIENT_ACTIVE_REQUESTS, + description="Number of active HTTP requests.", + unit="{request}", + ) + + +HTTP_CLIENT_CONNECTION_DURATION: Final = "http.client.connection.duration" +""" +The duration of the successfully established outbound HTTP connections +Instrument: histogram +Unit: s +""" + + +def create_http_client_connection_duration(meter: Meter) -> Histogram: + """The duration of the successfully established outbound HTTP connections""" + return meter.create_histogram( + name=HTTP_CLIENT_CONNECTION_DURATION, + description="The duration of the successfully established outbound HTTP connections.", + unit="s", + ) + + +HTTP_CLIENT_OPEN_CONNECTIONS: Final = "http.client.open_connections" +""" +Number of outbound HTTP connections that are currently active or idle on the client +Instrument: updowncounter +Unit: {connection} +""" + + +def create_http_client_open_connections(meter: Meter) -> UpDownCounter: + """Number of outbound HTTP connections that are currently active or idle on the client""" + return meter.create_up_down_counter( + name=HTTP_CLIENT_OPEN_CONNECTIONS, + description="Number of outbound HTTP connections that are currently active or idle on the client.", + unit="{connection}", + ) + + +HTTP_CLIENT_REQUEST_BODY_SIZE: Final = "http.client.request.body.size" +""" +Size of HTTP client request bodies +Instrument: histogram +Unit: By +Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + + +def create_http_client_request_body_size(meter: Meter) -> Histogram: + """Size of HTTP client request bodies""" + return meter.create_histogram( + name=HTTP_CLIENT_REQUEST_BODY_SIZE, + description="Size of HTTP client request bodies.", + unit="By", + ) + + +HTTP_CLIENT_REQUEST_DURATION: Final = "http.client.request.duration" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_CLIENT_REQUEST_DURATION`. +""" + + +def create_http_client_request_duration(meter: Meter) -> Histogram: + """Duration of HTTP client requests""" + return meter.create_histogram( + name=HTTP_CLIENT_REQUEST_DURATION, + description="Duration of HTTP client requests.", + unit="s", + ) + + +HTTP_CLIENT_RESPONSE_BODY_SIZE: Final = "http.client.response.body.size" +""" +Size of HTTP client response bodies +Instrument: histogram +Unit: By +Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + + +def create_http_client_response_body_size(meter: Meter) -> Histogram: + """Size of HTTP client response bodies""" + return meter.create_histogram( + name=HTTP_CLIENT_RESPONSE_BODY_SIZE, + description="Size of HTTP client response bodies.", + unit="By", + ) + + +HTTP_SERVER_ACTIVE_REQUESTS: Final = "http.server.active_requests" +""" +Number of active HTTP server requests +Instrument: updowncounter +Unit: {request} +""" + + +def create_http_server_active_requests(meter: Meter) -> UpDownCounter: + """Number of active HTTP server requests""" + return meter.create_up_down_counter( + name=HTTP_SERVER_ACTIVE_REQUESTS, + description="Number of active HTTP server requests.", + unit="{request}", + ) + + +HTTP_SERVER_REQUEST_BODY_SIZE: Final = "http.server.request.body.size" +""" +Size of HTTP server request bodies +Instrument: histogram +Unit: By +Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + + +def create_http_server_request_body_size(meter: Meter) -> Histogram: + """Size of HTTP server request bodies""" + return meter.create_histogram( + name=HTTP_SERVER_REQUEST_BODY_SIZE, + description="Size of HTTP server request bodies.", + unit="By", + ) + + +HTTP_SERVER_REQUEST_DURATION: Final = "http.server.request.duration" +""" +Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_SERVER_REQUEST_DURATION`. +""" + + +def create_http_server_request_duration(meter: Meter) -> Histogram: + """Duration of HTTP server requests""" + return meter.create_histogram( + name=HTTP_SERVER_REQUEST_DURATION, + description="Duration of HTTP server requests.", + unit="s", + ) + + +HTTP_SERVER_RESPONSE_BODY_SIZE: Final = "http.server.response.body.size" +""" +Size of HTTP server response bodies +Instrument: histogram +Unit: By +Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +""" + + +def create_http_server_response_body_size(meter: Meter) -> Histogram: + """Size of HTTP server response bodies""" + return meter.create_histogram( + name=HTTP_SERVER_RESPONSE_BODY_SIZE, + description="Size of HTTP server response bodies.", + unit="By", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py new file mode 100644 index 00000000..d06890fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py @@ -0,0 +1,190 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, + UpDownCounter, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +HW_ENERGY: Final = "hw.energy" +""" +Energy consumed by the component +Instrument: counter +Unit: J +""" + + +def create_hw_energy(meter: Meter) -> Counter: + """Energy consumed by the component""" + return meter.create_counter( + name=HW_ENERGY, + description="Energy consumed by the component", + unit="J", + ) + + +HW_ERRORS: Final = "hw.errors" +""" +Number of errors encountered by the component +Instrument: counter +Unit: {error} +""" + + +def create_hw_errors(meter: Meter) -> Counter: + """Number of errors encountered by the component""" + return meter.create_counter( + name=HW_ERRORS, + description="Number of errors encountered by the component", + unit="{error}", + ) + + +HW_HOST_AMBIENT_TEMPERATURE: Final = "hw.host.ambient_temperature" +""" +Ambient (external) temperature of the physical host +Instrument: gauge +Unit: Cel +""" + + +def create_hw_host_ambient_temperature( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Ambient (external) temperature of the physical host""" + return meter.create_observable_gauge( + name=HW_HOST_AMBIENT_TEMPERATURE, + callbacks=callbacks, + description="Ambient (external) temperature of the physical host", + unit="Cel", + ) + + +HW_HOST_ENERGY: Final = "hw.host.energy" +""" +Total energy consumed by the entire physical host, in joules +Instrument: counter +Unit: J +Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values. +""" + + +def create_hw_host_energy(meter: Meter) -> Counter: + """Total energy consumed by the entire physical host, in joules""" + return meter.create_counter( + name=HW_HOST_ENERGY, + description="Total energy consumed by the entire physical host, in joules", + unit="J", + ) + + +HW_HOST_HEATING_MARGIN: Final = "hw.host.heating_margin" +""" +By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors +Instrument: gauge +Unit: Cel +""" + + +def create_hw_host_heating_margin( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors""" + return meter.create_observable_gauge( + name=HW_HOST_HEATING_MARGIN, + callbacks=callbacks, + description="By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors", + unit="Cel", + ) + + +HW_HOST_POWER: Final = "hw.host.power" +""" +Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred) +Instrument: gauge +Unit: W +Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values. +""" + + +def create_hw_host_power( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)""" + return meter.create_observable_gauge( + name=HW_HOST_POWER, + callbacks=callbacks, + description="Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)", + unit="W", + ) + + +HW_POWER: Final = "hw.power" +""" +Instantaneous power consumed by the component +Instrument: gauge +Unit: W +Note: It is recommended to report `hw.energy` instead of `hw.power` when possible. +""" + + +def create_hw_power( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Instantaneous power consumed by the component""" + return meter.create_observable_gauge( + name=HW_POWER, + callbacks=callbacks, + description="Instantaneous power consumed by the component", + unit="W", + ) + + +HW_STATUS: Final = "hw.status" +""" +Operational status: `1` (true) or `0` (false) for each of the possible states +Instrument: updowncounter +Unit: 1 +Note: `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time. +""" + + +def create_hw_status(meter: Meter) -> UpDownCounter: + """Operational status: `1` (true) or `0` (false) for each of the possible states""" + return meter.create_up_down_counter( + name=HW_STATUS, + description="Operational status: `1` (true) or `0` (false) for each of the possible states", + unit="1", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py new file mode 100644 index 00000000..760d4d55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py @@ -0,0 +1,871 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, + UpDownCounter, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +K8S_CRONJOB_ACTIVE_JOBS: Final = "k8s.cronjob.active_jobs" +""" +The number of actively running jobs for a cronjob +Instrument: updowncounter +Unit: {job} +Note: This metric aligns with the `active` field of the +[K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.cronjob`](../resource/k8s.md#cronjob) resource. +""" + + +def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: + """The number of actively running jobs for a cronjob""" + return meter.create_up_down_counter( + name=K8S_CRONJOB_ACTIVE_JOBS, + description="The number of actively running jobs for a cronjob", + unit="{job}", + ) + + +K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: Final = ( + "k8s.daemonset.current_scheduled_nodes" +) +""" +Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod +Instrument: updowncounter +Unit: {node} +Note: This metric aligns with the `currentNumberScheduled` field of the +[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.daemonset`](../resource/k8s.md#daemonset) resource. +""" + + +def create_k8s_daemonset_current_scheduled_nodes( + meter: Meter, +) -> UpDownCounter: + """Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES, + description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod", + unit="{node}", + ) + + +K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: Final = ( + "k8s.daemonset.desired_scheduled_nodes" +) +""" +Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) +Instrument: updowncounter +Unit: {node} +Note: This metric aligns with the `desiredNumberScheduled` field of the +[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.daemonset`](../resource/k8s.md#daemonset) resource. +""" + + +def create_k8s_daemonset_desired_scheduled_nodes( + meter: Meter, +) -> UpDownCounter: + """Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES, + description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)", + unit="{node}", + ) + + +K8S_DAEMONSET_MISSCHEDULED_NODES: Final = "k8s.daemonset.misscheduled_nodes" +""" +Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod +Instrument: updowncounter +Unit: {node} +Note: This metric aligns with the `numberMisscheduled` field of the +[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.daemonset`](../resource/k8s.md#daemonset) resource. +""" + + +def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: + """Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_MISSCHEDULED_NODES, + description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod", + unit="{node}", + ) + + +K8S_DAEMONSET_READY_NODES: Final = "k8s.daemonset.ready_nodes" +""" +Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready +Instrument: updowncounter +Unit: {node} +Note: This metric aligns with the `numberReady` field of the +[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.daemonset`](../resource/k8s.md#daemonset) resource. +""" + + +def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: + """Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_READY_NODES, + description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready", + unit="{node}", + ) + + +K8S_DEPLOYMENT_AVAILABLE_PODS: Final = "k8s.deployment.available_pods" +""" +Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `availableReplicas` field of the +[K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.deployment`](../resource/k8s.md#deployment) resource. +""" + + +def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: + """Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment""" + return meter.create_up_down_counter( + name=K8S_DEPLOYMENT_AVAILABLE_PODS, + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment", + unit="{pod}", + ) + + +K8S_DEPLOYMENT_DESIRED_PODS: Final = "k8s.deployment.desired_pods" +""" +Number of desired replica pods in this deployment +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `replicas` field of the +[K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.deployment`](../resource/k8s.md#deployment) resource. +""" + + +def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: + """Number of desired replica pods in this deployment""" + return meter.create_up_down_counter( + name=K8S_DEPLOYMENT_DESIRED_PODS, + description="Number of desired replica pods in this deployment", + unit="{pod}", + ) + + +K8S_HPA_CURRENT_PODS: Final = "k8s.hpa.current_pods" +""" +Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `currentReplicas` field of the +[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource. +""" + + +def create_k8s_hpa_current_pods(meter: Meter) -> UpDownCounter: + """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler""" + return meter.create_up_down_counter( + name=K8S_HPA_CURRENT_PODS, + description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler", + unit="{pod}", + ) + + +K8S_HPA_DESIRED_PODS: Final = "k8s.hpa.desired_pods" +""" +Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `desiredReplicas` field of the +[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource. +""" + + +def create_k8s_hpa_desired_pods(meter: Meter) -> UpDownCounter: + """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler""" + return meter.create_up_down_counter( + name=K8S_HPA_DESIRED_PODS, + description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler", + unit="{pod}", + ) + + +K8S_HPA_MAX_PODS: Final = "k8s.hpa.max_pods" +""" +The upper limit for the number of replica pods to which the autoscaler can scale up +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `maxReplicas` field of the +[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource. +""" + + +def create_k8s_hpa_max_pods(meter: Meter) -> UpDownCounter: + """The upper limit for the number of replica pods to which the autoscaler can scale up""" + return meter.create_up_down_counter( + name=K8S_HPA_MAX_PODS, + description="The upper limit for the number of replica pods to which the autoscaler can scale up", + unit="{pod}", + ) + + +K8S_HPA_MIN_PODS: Final = "k8s.hpa.min_pods" +""" +The lower limit for the number of replica pods to which the autoscaler can scale down +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `minReplicas` field of the +[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource. +""" + + +def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: + """The lower limit for the number of replica pods to which the autoscaler can scale down""" + return meter.create_up_down_counter( + name=K8S_HPA_MIN_PODS, + description="The lower limit for the number of replica pods to which the autoscaler can scale down", + unit="{pod}", + ) + + +K8S_JOB_ACTIVE_PODS: Final = "k8s.job.active_pods" +""" +The number of pending and actively running pods for a job +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `active` field of the +[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.job`](../resource/k8s.md#job) resource. +""" + + +def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: + """The number of pending and actively running pods for a job""" + return meter.create_up_down_counter( + name=K8S_JOB_ACTIVE_PODS, + description="The number of pending and actively running pods for a job", + unit="{pod}", + ) + + +K8S_JOB_DESIRED_SUCCESSFUL_PODS: Final = "k8s.job.desired_successful_pods" +""" +The desired number of successfully finished pods the job should be run with +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `completions` field of the +[K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.job`](../resource/k8s.md#job) resource. +""" + + +def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: + """The desired number of successfully finished pods the job should be run with""" + return meter.create_up_down_counter( + name=K8S_JOB_DESIRED_SUCCESSFUL_PODS, + description="The desired number of successfully finished pods the job should be run with", + unit="{pod}", + ) + + +K8S_JOB_FAILED_PODS: Final = "k8s.job.failed_pods" +""" +The number of pods which reached phase Failed for a job +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `failed` field of the +[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.job`](../resource/k8s.md#job) resource. +""" + + +def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: + """The number of pods which reached phase Failed for a job""" + return meter.create_up_down_counter( + name=K8S_JOB_FAILED_PODS, + description="The number of pods which reached phase Failed for a job", + unit="{pod}", + ) + + +K8S_JOB_MAX_PARALLEL_PODS: Final = "k8s.job.max_parallel_pods" +""" +The max desired number of pods the job should run at any given time +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `parallelism` field of the +[K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.job`](../resource/k8s.md#job) resource. +""" + + +def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: + """The max desired number of pods the job should run at any given time""" + return meter.create_up_down_counter( + name=K8S_JOB_MAX_PARALLEL_PODS, + description="The max desired number of pods the job should run at any given time", + unit="{pod}", + ) + + +K8S_JOB_SUCCESSFUL_PODS: Final = "k8s.job.successful_pods" +""" +The number of pods which reached phase Succeeded for a job +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `succeeded` field of the +[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.job`](../resource/k8s.md#job) resource. +""" + + +def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter: + """The number of pods which reached phase Succeeded for a job""" + return meter.create_up_down_counter( + name=K8S_JOB_SUCCESSFUL_PODS, + description="The number of pods which reached phase Succeeded for a job", + unit="{pod}", + ) + + +K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase" +""" +Describes number of K8s namespaces that are currently in a given phase +Instrument: updowncounter +Unit: {namespace} +Note: This metric SHOULD, at a minimum, be reported against a +[`k8s.namespace`](../resource/k8s.md#namespace) resource. +""" + + +def create_k8s_namespace_phase(meter: Meter) -> UpDownCounter: + """Describes number of K8s namespaces that are currently in a given phase""" + return meter.create_up_down_counter( + name=K8S_NAMESPACE_PHASE, + description="Describes number of K8s namespaces that are currently in a given phase.", + unit="{namespace}", + ) + + +K8S_NODE_CPU_TIME: Final = "k8s.node.cpu.time" +""" +Total CPU time consumed +Instrument: counter +Unit: s +Note: Total CPU time consumed by the specific Node on all available CPU cores. +""" + + +def create_k8s_node_cpu_time(meter: Meter) -> Counter: + """Total CPU time consumed""" + return meter.create_counter( + name=K8S_NODE_CPU_TIME, + description="Total CPU time consumed", + unit="s", + ) + + +K8S_NODE_CPU_USAGE: Final = "k8s.node.cpu.usage" +""" +Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +Instrument: gauge +Unit: {cpu} +Note: CPU usage of the specific Node on all available CPU cores, averaged over the sample window. +""" + + +def create_k8s_node_cpu_usage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" + return meter.create_observable_gauge( + name=K8S_NODE_CPU_USAGE, + callbacks=callbacks, + description="Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + unit="{cpu}", + ) + + +K8S_NODE_MEMORY_USAGE: Final = "k8s.node.memory.usage" +""" +Memory usage of the Node +Instrument: gauge +Unit: By +Note: Total memory usage of the Node. +""" + + +def create_k8s_node_memory_usage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Memory usage of the Node""" + return meter.create_observable_gauge( + name=K8S_NODE_MEMORY_USAGE, + callbacks=callbacks, + description="Memory usage of the Node", + unit="By", + ) + + +K8S_NODE_NETWORK_ERRORS: Final = "k8s.node.network.errors" +""" +Node network errors +Instrument: counter +Unit: {error} +""" + + +def create_k8s_node_network_errors(meter: Meter) -> Counter: + """Node network errors""" + return meter.create_counter( + name=K8S_NODE_NETWORK_ERRORS, + description="Node network errors", + unit="{error}", + ) + + +K8S_NODE_NETWORK_IO: Final = "k8s.node.network.io" +""" +Network bytes for the Node +Instrument: counter +Unit: By +""" + + +def create_k8s_node_network_io(meter: Meter) -> Counter: + """Network bytes for the Node""" + return meter.create_counter( + name=K8S_NODE_NETWORK_IO, + description="Network bytes for the Node", + unit="By", + ) + + +K8S_NODE_UPTIME: Final = "k8s.node.uptime" +""" +The time the Node has been running +Instrument: gauge +Unit: s +Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +The actual accuracy would depend on the instrumentation and operating system. +""" + + +def create_k8s_node_uptime( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time the Node has been running""" + return meter.create_observable_gauge( + name=K8S_NODE_UPTIME, + callbacks=callbacks, + description="The time the Node has been running", + unit="s", + ) + + +K8S_POD_CPU_TIME: Final = "k8s.pod.cpu.time" +""" +Total CPU time consumed +Instrument: counter +Unit: s +Note: Total CPU time consumed by the specific Pod on all available CPU cores. +""" + + +def create_k8s_pod_cpu_time(meter: Meter) -> Counter: + """Total CPU time consumed""" + return meter.create_counter( + name=K8S_POD_CPU_TIME, + description="Total CPU time consumed", + unit="s", + ) + + +K8S_POD_CPU_USAGE: Final = "k8s.pod.cpu.usage" +""" +Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +Instrument: gauge +Unit: {cpu} +Note: CPU usage of the specific Pod on all available CPU cores, averaged over the sample window. +""" + + +def create_k8s_pod_cpu_usage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" + return meter.create_observable_gauge( + name=K8S_POD_CPU_USAGE, + callbacks=callbacks, + description="Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs", + unit="{cpu}", + ) + + +K8S_POD_MEMORY_USAGE: Final = "k8s.pod.memory.usage" +""" +Memory usage of the Pod +Instrument: gauge +Unit: By +Note: Total memory usage of the Pod. +""" + + +def create_k8s_pod_memory_usage( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Memory usage of the Pod""" + return meter.create_observable_gauge( + name=K8S_POD_MEMORY_USAGE, + callbacks=callbacks, + description="Memory usage of the Pod", + unit="By", + ) + + +K8S_POD_NETWORK_ERRORS: Final = "k8s.pod.network.errors" +""" +Pod network errors +Instrument: counter +Unit: {error} +""" + + +def create_k8s_pod_network_errors(meter: Meter) -> Counter: + """Pod network errors""" + return meter.create_counter( + name=K8S_POD_NETWORK_ERRORS, + description="Pod network errors", + unit="{error}", + ) + + +K8S_POD_NETWORK_IO: Final = "k8s.pod.network.io" +""" +Network bytes for the Pod +Instrument: counter +Unit: By +""" + + +def create_k8s_pod_network_io(meter: Meter) -> Counter: + """Network bytes for the Pod""" + return meter.create_counter( + name=K8S_POD_NETWORK_IO, + description="Network bytes for the Pod", + unit="By", + ) + + +K8S_POD_UPTIME: Final = "k8s.pod.uptime" +""" +The time the Pod has been running +Instrument: gauge +Unit: s +Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +The actual accuracy would depend on the instrumentation and operating system. +""" + + +def create_k8s_pod_uptime( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time the Pod has been running""" + return meter.create_observable_gauge( + name=K8S_POD_UPTIME, + callbacks=callbacks, + description="The time the Pod has been running", + unit="s", + ) + + +K8S_REPLICASET_AVAILABLE_PODS: Final = "k8s.replicaset.available_pods" +""" +Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `availableReplicas` field of the +[K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.replicaset`](../resource/k8s.md#replicaset) resource. +""" + + +def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: + """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset""" + return meter.create_up_down_counter( + name=K8S_REPLICASET_AVAILABLE_PODS, + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset", + unit="{pod}", + ) + + +K8S_REPLICASET_DESIRED_PODS: Final = "k8s.replicaset.desired_pods" +""" +Number of desired replica pods in this replicaset +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `replicas` field of the +[K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.replicaset`](../resource/k8s.md#replicaset) resource. +""" + + +def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: + """Number of desired replica pods in this replicaset""" + return meter.create_up_down_counter( + name=K8S_REPLICASET_DESIRED_PODS, + description="Number of desired replica pods in this replicaset", + unit="{pod}", + ) + + +K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: Final = ( + "k8s.replication_controller.available_pods" +) +""" +Deprecated: Replaced by `k8s.replicationcontroller.available_pods`. +""" + + +def create_k8s_replication_controller_available_pods( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.replicationcontroller.available_pods` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS, + description="Deprecated, use `k8s.replicationcontroller.available_pods` instead.", + unit="{pod}", + ) + + +K8S_REPLICATION_CONTROLLER_DESIRED_PODS: Final = ( + "k8s.replication_controller.desired_pods" +) +""" +Deprecated: Replaced by `k8s.replicationcontroller.desired_pods`. +""" + + +def create_k8s_replication_controller_desired_pods( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.replicationcontroller.desired_pods` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICATION_CONTROLLER_DESIRED_PODS, + description="Deprecated, use `k8s.replicationcontroller.desired_pods` instead.", + unit="{pod}", + ) + + +K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS: Final = ( + "k8s.replicationcontroller.available_pods" +) +""" +Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `availableReplicas` field of the +[K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource. +""" + + +def create_k8s_replicationcontroller_available_pods( + meter: Meter, +) -> UpDownCounter: + """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller""" + return meter.create_up_down_counter( + name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS, + description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller", + unit="{pod}", + ) + + +K8S_REPLICATIONCONTROLLER_DESIRED_PODS: Final = ( + "k8s.replicationcontroller.desired_pods" +) +""" +Number of desired replica pods in this replication controller +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `replicas` field of the +[K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core) + +This metric SHOULD, at a minimum, be reported against a +[`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource. +""" + + +def create_k8s_replicationcontroller_desired_pods( + meter: Meter, +) -> UpDownCounter: + """Number of desired replica pods in this replication controller""" + return meter.create_up_down_counter( + name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS, + description="Number of desired replica pods in this replication controller", + unit="{pod}", + ) + + +K8S_STATEFULSET_CURRENT_PODS: Final = "k8s.statefulset.current_pods" +""" +The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `currentReplicas` field of the +[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.statefulset`](../resource/k8s.md#statefulset) resource. +""" + + +def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: + """The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_CURRENT_PODS, + description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision", + unit="{pod}", + ) + + +K8S_STATEFULSET_DESIRED_PODS: Final = "k8s.statefulset.desired_pods" +""" +Number of desired replica pods in this statefulset +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `replicas` field of the +[K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.statefulset`](../resource/k8s.md#statefulset) resource. +""" + + +def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: + """Number of desired replica pods in this statefulset""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_DESIRED_PODS, + description="Number of desired replica pods in this statefulset", + unit="{pod}", + ) + + +K8S_STATEFULSET_READY_PODS: Final = "k8s.statefulset.ready_pods" +""" +The number of replica pods created for this statefulset with a Ready Condition +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `readyReplicas` field of the +[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.statefulset`](../resource/k8s.md#statefulset) resource. +""" + + +def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: + """The number of replica pods created for this statefulset with a Ready Condition""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_READY_PODS, + description="The number of replica pods created for this statefulset with a Ready Condition", + unit="{pod}", + ) + + +K8S_STATEFULSET_UPDATED_PODS: Final = "k8s.statefulset.updated_pods" +""" +Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `updatedReplicas` field of the +[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). + +This metric SHOULD, at a minimum, be reported against a +[`k8s.statefulset`](../resource/k8s.md#statefulset) resource. +""" + + +def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter: + """Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_UPDATED_PODS, + description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision", + unit="{pod}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py new file mode 100644 index 00000000..0418743f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py @@ -0,0 +1,186 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Histogram, Meter + +MESSAGING_CLIENT_CONSUMED_MESSAGES: Final = ( + "messaging.client.consumed.messages" +) +""" +Number of messages that were delivered to the application +Instrument: counter +Unit: {message} +Note: Records the number of messages pulled from the broker or number of messages dispatched to the application in push-based scenarios. +The metric SHOULD be reported once per message delivery. For example, if receiving and processing operations are both instrumented for a single message delivery, this counter is incremented when the message is received and not reported when it is processed. +""" + + +def create_messaging_client_consumed_messages(meter: Meter) -> Counter: + """Number of messages that were delivered to the application""" + return meter.create_counter( + name=MESSAGING_CLIENT_CONSUMED_MESSAGES, + description="Number of messages that were delivered to the application.", + unit="{message}", + ) + + +MESSAGING_CLIENT_OPERATION_DURATION: Final = ( + "messaging.client.operation.duration" +) +""" +Duration of messaging operation initiated by a producer or consumer client +Instrument: histogram +Unit: s +Note: This metric SHOULD NOT be used to report processing duration - processing duration is reported in `messaging.process.duration` metric. +""" + + +def create_messaging_client_operation_duration(meter: Meter) -> Histogram: + """Duration of messaging operation initiated by a producer or consumer client""" + return meter.create_histogram( + name=MESSAGING_CLIENT_OPERATION_DURATION, + description="Duration of messaging operation initiated by a producer or consumer client.", + unit="s", + ) + + +MESSAGING_CLIENT_PUBLISHED_MESSAGES: Final = ( + "messaging.client.published.messages" +) +""" +Deprecated: Replaced by `messaging.client.sent.messages`. +""" + + +def create_messaging_client_published_messages(meter: Meter) -> Counter: + """Deprecated. Use `messaging.client.sent.messages` instead""" + return meter.create_counter( + name=MESSAGING_CLIENT_PUBLISHED_MESSAGES, + description="Deprecated. Use `messaging.client.sent.messages` instead.", + unit="{message}", + ) + + +MESSAGING_CLIENT_SENT_MESSAGES: Final = "messaging.client.sent.messages" +""" +Number of messages producer attempted to send to the broker +Instrument: counter +Unit: {message} +Note: This metric MUST NOT count messages that were created but haven't yet been sent. +""" + + +def create_messaging_client_sent_messages(meter: Meter) -> Counter: + """Number of messages producer attempted to send to the broker""" + return meter.create_counter( + name=MESSAGING_CLIENT_SENT_MESSAGES, + description="Number of messages producer attempted to send to the broker.", + unit="{message}", + ) + + +MESSAGING_PROCESS_DURATION: Final = "messaging.process.duration" +""" +Duration of processing operation +Instrument: histogram +Unit: s +Note: This metric MUST be reported for operations with `messaging.operation.type` that matches `process`. +""" + + +def create_messaging_process_duration(meter: Meter) -> Histogram: + """Duration of processing operation""" + return meter.create_histogram( + name=MESSAGING_PROCESS_DURATION, + description="Duration of processing operation.", + unit="s", + ) + + +MESSAGING_PROCESS_MESSAGES: Final = "messaging.process.messages" +""" +Deprecated: Replaced by `messaging.client.consumed.messages`. +""" + + +def create_messaging_process_messages(meter: Meter) -> Counter: + """Deprecated. Use `messaging.client.consumed.messages` instead""" + return meter.create_counter( + name=MESSAGING_PROCESS_MESSAGES, + description="Deprecated. Use `messaging.client.consumed.messages` instead.", + unit="{message}", + ) + + +MESSAGING_PUBLISH_DURATION: Final = "messaging.publish.duration" +""" +Deprecated: Replaced by `messaging.client.operation.duration`. +""" + + +def create_messaging_publish_duration(meter: Meter) -> Histogram: + """Deprecated. Use `messaging.client.operation.duration` instead""" + return meter.create_histogram( + name=MESSAGING_PUBLISH_DURATION, + description="Deprecated. Use `messaging.client.operation.duration` instead.", + unit="s", + ) + + +MESSAGING_PUBLISH_MESSAGES: Final = "messaging.publish.messages" +""" +Deprecated: Replaced by `messaging.client.produced.messages`. +""" + + +def create_messaging_publish_messages(meter: Meter) -> Counter: + """Deprecated. Use `messaging.client.produced.messages` instead""" + return meter.create_counter( + name=MESSAGING_PUBLISH_MESSAGES, + description="Deprecated. Use `messaging.client.produced.messages` instead.", + unit="{message}", + ) + + +MESSAGING_RECEIVE_DURATION: Final = "messaging.receive.duration" +""" +Deprecated: Replaced by `messaging.client.operation.duration`. +""" + + +def create_messaging_receive_duration(meter: Meter) -> Histogram: + """Deprecated. Use `messaging.client.operation.duration` instead""" + return meter.create_histogram( + name=MESSAGING_RECEIVE_DURATION, + description="Deprecated. Use `messaging.client.operation.duration` instead.", + unit="s", + ) + + +MESSAGING_RECEIVE_MESSAGES: Final = "messaging.receive.messages" +""" +Deprecated: Replaced by `messaging.client.consumed.messages`. +""" + + +def create_messaging_receive_messages(meter: Meter) -> Counter: + """Deprecated. Use `messaging.client.consumed.messages` instead""" + return meter.create_counter( + name=MESSAGING_RECEIVE_MESSAGES, + description="Deprecated. Use `messaging.client.consumed.messages` instead.", + unit="{message}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py new file mode 100644 index 00000000..ab9a8f1f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py @@ -0,0 +1,162 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Meter, UpDownCounter + +OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT: Final = ( + "otel.sdk.exporter.span.exported.count" +) +""" +The number of spans for which the export has finished, either successful or failed +Instrument: counter +Unit: {span} +Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` must contain the failure cause. +For exporters with partial success semantics (e.g. OTLP with `rejected_spans`), rejected spans must count as failed and only non-rejected spans count as success. +If no rejection reason is available, `rejected` SHOULD be used as value for `error.type`. +""" + + +def create_otel_sdk_exporter_span_exported_count(meter: Meter) -> Counter: + """The number of spans for which the export has finished, either successful or failed""" + return meter.create_counter( + name=OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT, + description="The number of spans for which the export has finished, either successful or failed", + unit="{span}", + ) + + +OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT: Final = ( + "otel.sdk.exporter.span.inflight.count" +) +""" +The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed) +Instrument: updowncounter +Unit: {span} +Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` must contain the failure cause. +""" + + +def create_otel_sdk_exporter_span_inflight_count( + meter: Meter, +) -> UpDownCounter: + """The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" + return meter.create_up_down_counter( + name=OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT, + description="The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)", + unit="{span}", + ) + + +OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT: Final = ( + "otel.sdk.processor.span.processed.count" +) +""" +The number of spans for which the processing has finished, either successful or failed +Instrument: counter +Unit: {span} +Note: For successful processing, `error.type` MUST NOT be set. For failed processing, `error.type` must contain the failure cause. +For the SDK Simple and Batching Span Processor a span is considered to be processed already when it has been submitted to the exporter, not when the corresponding export call has finished. +""" + + +def create_otel_sdk_processor_span_processed_count(meter: Meter) -> Counter: + """The number of spans for which the processing has finished, either successful or failed""" + return meter.create_counter( + name=OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT, + description="The number of spans for which the processing has finished, either successful or failed", + unit="{span}", + ) + + +OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY: Final = ( + "otel.sdk.processor.span.queue.capacity" +) +""" +The maximum number of spans the queue of a given instance of an SDK span processor can hold +Instrument: updowncounter +Unit: {span} +Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor. +""" + + +def create_otel_sdk_processor_span_queue_capacity( + meter: Meter, +) -> UpDownCounter: + """The maximum number of spans the queue of a given instance of an SDK span processor can hold""" + return meter.create_up_down_counter( + name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY, + description="The maximum number of spans the queue of a given instance of an SDK span processor can hold", + unit="{span}", + ) + + +OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE: Final = ( + "otel.sdk.processor.span.queue.size" +) +""" +The number of spans in the queue of a given instance of an SDK span processor +Instrument: updowncounter +Unit: {span} +Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor. +""" + + +def create_otel_sdk_processor_span_queue_size(meter: Meter) -> UpDownCounter: + """The number of spans in the queue of a given instance of an SDK span processor""" + return meter.create_up_down_counter( + name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE, + description="The number of spans in the queue of a given instance of an SDK span processor", + unit="{span}", + ) + + +OTEL_SDK_SPAN_ENDED_COUNT: Final = "otel.sdk.span.ended.count" +""" +The number of created spans for which the end operation was called +Instrument: counter +Unit: {span} +Note: For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live.count` and `otel.sdk.span.ended.count`. +For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.live.count`. +""" + + +def create_otel_sdk_span_ended_count(meter: Meter) -> Counter: + """The number of created spans for which the end operation was called""" + return meter.create_counter( + name=OTEL_SDK_SPAN_ENDED_COUNT, + description="The number of created spans for which the end operation was called", + unit="{span}", + ) + + +OTEL_SDK_SPAN_LIVE_COUNT: Final = "otel.sdk.span.live.count" +""" +The number of created spans for which the end operation has not been called yet +Instrument: updowncounter +Unit: {span} +Note: For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live.count` and `otel.sdk.span.ended.count`. +For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.ended.count`. +""" + + +def create_otel_sdk_span_live_count(meter: Meter) -> UpDownCounter: + """The number of created spans for which the end operation has not been called yet""" + return meter.create_up_down_counter( + name=OTEL_SDK_SPAN_LIVE_COUNT, + description="The number of created spans for which the end operation has not been called yet", + unit="{span}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py new file mode 100644 index 00000000..902d79de --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py @@ -0,0 +1,235 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, + UpDownCounter, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +PROCESS_CONTEXT_SWITCHES: Final = "process.context_switches" +""" +Number of times the process has been context switched +Instrument: counter +Unit: {context_switch} +""" + + +def create_process_context_switches(meter: Meter) -> Counter: + """Number of times the process has been context switched""" + return meter.create_counter( + name=PROCESS_CONTEXT_SWITCHES, + description="Number of times the process has been context switched.", + unit="{context_switch}", + ) + + +PROCESS_CPU_TIME: Final = "process.cpu.time" +""" +Total CPU seconds broken down by different states +Instrument: counter +Unit: s +""" + + +def create_process_cpu_time(meter: Meter) -> Counter: + """Total CPU seconds broken down by different states""" + return meter.create_counter( + name=PROCESS_CPU_TIME, + description="Total CPU seconds broken down by different states.", + unit="s", + ) + + +PROCESS_CPU_UTILIZATION: Final = "process.cpu.utilization" +""" +Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process +Instrument: gauge +Unit: 1 +""" + + +def create_process_cpu_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process""" + return meter.create_observable_gauge( + name=PROCESS_CPU_UTILIZATION, + callbacks=callbacks, + description="Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process.", + unit="1", + ) + + +PROCESS_DISK_IO: Final = "process.disk.io" +""" +Disk bytes transferred +Instrument: counter +Unit: By +""" + + +def create_process_disk_io(meter: Meter) -> Counter: + """Disk bytes transferred""" + return meter.create_counter( + name=PROCESS_DISK_IO, + description="Disk bytes transferred.", + unit="By", + ) + + +PROCESS_MEMORY_USAGE: Final = "process.memory.usage" +""" +The amount of physical memory in use +Instrument: updowncounter +Unit: By +""" + + +def create_process_memory_usage(meter: Meter) -> UpDownCounter: + """The amount of physical memory in use""" + return meter.create_up_down_counter( + name=PROCESS_MEMORY_USAGE, + description="The amount of physical memory in use.", + unit="By", + ) + + +PROCESS_MEMORY_VIRTUAL: Final = "process.memory.virtual" +""" +The amount of committed virtual memory +Instrument: updowncounter +Unit: By +""" + + +def create_process_memory_virtual(meter: Meter) -> UpDownCounter: + """The amount of committed virtual memory""" + return meter.create_up_down_counter( + name=PROCESS_MEMORY_VIRTUAL, + description="The amount of committed virtual memory.", + unit="By", + ) + + +PROCESS_NETWORK_IO: Final = "process.network.io" +""" +Network bytes transferred +Instrument: counter +Unit: By +""" + + +def create_process_network_io(meter: Meter) -> Counter: + """Network bytes transferred""" + return meter.create_counter( + name=PROCESS_NETWORK_IO, + description="Network bytes transferred.", + unit="By", + ) + + +PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: Final = ( + "process.open_file_descriptor.count" +) +""" +Number of file descriptors in use by the process +Instrument: updowncounter +Unit: {file_descriptor} +""" + + +def create_process_open_file_descriptor_count(meter: Meter) -> UpDownCounter: + """Number of file descriptors in use by the process""" + return meter.create_up_down_counter( + name=PROCESS_OPEN_FILE_DESCRIPTOR_COUNT, + description="Number of file descriptors in use by the process.", + unit="{file_descriptor}", + ) + + +PROCESS_PAGING_FAULTS: Final = "process.paging.faults" +""" +Number of page faults the process has made +Instrument: counter +Unit: {fault} +""" + + +def create_process_paging_faults(meter: Meter) -> Counter: + """Number of page faults the process has made""" + return meter.create_counter( + name=PROCESS_PAGING_FAULTS, + description="Number of page faults the process has made.", + unit="{fault}", + ) + + +PROCESS_THREAD_COUNT: Final = "process.thread.count" +""" +Process threads count +Instrument: updowncounter +Unit: {thread} +""" + + +def create_process_thread_count(meter: Meter) -> UpDownCounter: + """Process threads count""" + return meter.create_up_down_counter( + name=PROCESS_THREAD_COUNT, + description="Process threads count.", + unit="{thread}", + ) + + +PROCESS_UPTIME: Final = "process.uptime" +""" +The time the process has been running +Instrument: gauge +Unit: s +Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +The actual accuracy would depend on the instrumentation and operating system. +""" + + +def create_process_uptime( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time the process has been running""" + return meter.create_observable_gauge( + name=PROCESS_UPTIME, + callbacks=callbacks, + description="The time the process has been running.", + unit="s", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py new file mode 100644 index 00000000..e3f4ad6e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py @@ -0,0 +1,211 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Histogram, Meter + +RPC_CLIENT_DURATION: Final = "rpc.client.duration" +""" +Measures the duration of outbound RPC +Instrument: histogram +Unit: ms +Note: While streaming RPCs may record this metric as start-of-batch +to end-of-batch, it's hard to interpret in practice. + +**Streaming**: N/A. +""" + + +def create_rpc_client_duration(meter: Meter) -> Histogram: + """Measures the duration of outbound RPC""" + return meter.create_histogram( + name=RPC_CLIENT_DURATION, + description="Measures the duration of outbound RPC.", + unit="ms", + ) + + +RPC_CLIENT_REQUEST_SIZE: Final = "rpc.client.request.size" +""" +Measures the size of RPC request messages (uncompressed) +Instrument: histogram +Unit: By +Note: **Streaming**: Recorded per message in a streaming batch. +""" + + +def create_rpc_client_request_size(meter: Meter) -> Histogram: + """Measures the size of RPC request messages (uncompressed)""" + return meter.create_histogram( + name=RPC_CLIENT_REQUEST_SIZE, + description="Measures the size of RPC request messages (uncompressed).", + unit="By", + ) + + +RPC_CLIENT_REQUESTS_PER_RPC: Final = "rpc.client.requests_per_rpc" +""" +Measures the number of messages received per RPC +Instrument: histogram +Unit: {count} +Note: Should be 1 for all non-streaming RPCs. + +**Streaming**: This metric is required for server and client streaming RPCs. +""" + + +def create_rpc_client_requests_per_rpc(meter: Meter) -> Histogram: + """Measures the number of messages received per RPC""" + return meter.create_histogram( + name=RPC_CLIENT_REQUESTS_PER_RPC, + description="Measures the number of messages received per RPC.", + unit="{count}", + ) + + +RPC_CLIENT_RESPONSE_SIZE: Final = "rpc.client.response.size" +""" +Measures the size of RPC response messages (uncompressed) +Instrument: histogram +Unit: By +Note: **Streaming**: Recorded per response in a streaming batch. +""" + + +def create_rpc_client_response_size(meter: Meter) -> Histogram: + """Measures the size of RPC response messages (uncompressed)""" + return meter.create_histogram( + name=RPC_CLIENT_RESPONSE_SIZE, + description="Measures the size of RPC response messages (uncompressed).", + unit="By", + ) + + +RPC_CLIENT_RESPONSES_PER_RPC: Final = "rpc.client.responses_per_rpc" +""" +Measures the number of messages sent per RPC +Instrument: histogram +Unit: {count} +Note: Should be 1 for all non-streaming RPCs. + +**Streaming**: This metric is required for server and client streaming RPCs. +""" + + +def create_rpc_client_responses_per_rpc(meter: Meter) -> Histogram: + """Measures the number of messages sent per RPC""" + return meter.create_histogram( + name=RPC_CLIENT_RESPONSES_PER_RPC, + description="Measures the number of messages sent per RPC.", + unit="{count}", + ) + + +RPC_SERVER_DURATION: Final = "rpc.server.duration" +""" +Measures the duration of inbound RPC +Instrument: histogram +Unit: ms +Note: While streaming RPCs may record this metric as start-of-batch +to end-of-batch, it's hard to interpret in practice. + +**Streaming**: N/A. +""" + + +def create_rpc_server_duration(meter: Meter) -> Histogram: + """Measures the duration of inbound RPC""" + return meter.create_histogram( + name=RPC_SERVER_DURATION, + description="Measures the duration of inbound RPC.", + unit="ms", + ) + + +RPC_SERVER_REQUEST_SIZE: Final = "rpc.server.request.size" +""" +Measures the size of RPC request messages (uncompressed) +Instrument: histogram +Unit: By +Note: **Streaming**: Recorded per message in a streaming batch. +""" + + +def create_rpc_server_request_size(meter: Meter) -> Histogram: + """Measures the size of RPC request messages (uncompressed)""" + return meter.create_histogram( + name=RPC_SERVER_REQUEST_SIZE, + description="Measures the size of RPC request messages (uncompressed).", + unit="By", + ) + + +RPC_SERVER_REQUESTS_PER_RPC: Final = "rpc.server.requests_per_rpc" +""" +Measures the number of messages received per RPC +Instrument: histogram +Unit: {count} +Note: Should be 1 for all non-streaming RPCs. + +**Streaming** : This metric is required for server and client streaming RPCs. +""" + + +def create_rpc_server_requests_per_rpc(meter: Meter) -> Histogram: + """Measures the number of messages received per RPC""" + return meter.create_histogram( + name=RPC_SERVER_REQUESTS_PER_RPC, + description="Measures the number of messages received per RPC.", + unit="{count}", + ) + + +RPC_SERVER_RESPONSE_SIZE: Final = "rpc.server.response.size" +""" +Measures the size of RPC response messages (uncompressed) +Instrument: histogram +Unit: By +Note: **Streaming**: Recorded per response in a streaming batch. +""" + + +def create_rpc_server_response_size(meter: Meter) -> Histogram: + """Measures the size of RPC response messages (uncompressed)""" + return meter.create_histogram( + name=RPC_SERVER_RESPONSE_SIZE, + description="Measures the size of RPC response messages (uncompressed).", + unit="By", + ) + + +RPC_SERVER_RESPONSES_PER_RPC: Final = "rpc.server.responses_per_rpc" +""" +Measures the number of messages sent per RPC +Instrument: histogram +Unit: {count} +Note: Should be 1 for all non-streaming RPCs. + +**Streaming**: This metric is required for server and client streaming RPCs. +""" + + +def create_rpc_server_responses_per_rpc(meter: Meter) -> Histogram: + """Measures the number of messages sent per RPC""" + return meter.create_histogram( + name=RPC_SERVER_RESPONSES_PER_RPC, + description="Measures the number of messages sent per RPC.", + unit="{count}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py new file mode 100644 index 00000000..df2a6571 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py @@ -0,0 +1,611 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Counter, + Meter, + ObservableGauge, + Observation, + UpDownCounter, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +SYSTEM_CPU_FREQUENCY: Final = "system.cpu.frequency" +""" +Deprecated: Replaced by `cpu.frequency`. +""" + + +def create_system_cpu_frequency( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Deprecated. Use `cpu.frequency` instead""" + return meter.create_observable_gauge( + name=SYSTEM_CPU_FREQUENCY, + callbacks=callbacks, + description="Deprecated. Use `cpu.frequency` instead.", + unit="{Hz}", + ) + + +SYSTEM_CPU_LOGICAL_COUNT: Final = "system.cpu.logical.count" +""" +Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking +Instrument: updowncounter +Unit: {cpu} +Note: Calculated by multiplying the number of sockets by the number of cores per socket, and then by the number of threads per core. +""" + + +def create_system_cpu_logical_count(meter: Meter) -> UpDownCounter: + """Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking""" + return meter.create_up_down_counter( + name=SYSTEM_CPU_LOGICAL_COUNT, + description="Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking", + unit="{cpu}", + ) + + +SYSTEM_CPU_PHYSICAL_COUNT: Final = "system.cpu.physical.count" +""" +Reports the number of actual physical processor cores on the hardware +Instrument: updowncounter +Unit: {cpu} +Note: Calculated by multiplying the number of sockets by the number of cores per socket. +""" + + +def create_system_cpu_physical_count(meter: Meter) -> UpDownCounter: + """Reports the number of actual physical processor cores on the hardware""" + return meter.create_up_down_counter( + name=SYSTEM_CPU_PHYSICAL_COUNT, + description="Reports the number of actual physical processor cores on the hardware", + unit="{cpu}", + ) + + +SYSTEM_CPU_TIME: Final = "system.cpu.time" +""" +Deprecated: Replaced by `cpu.time`. +""" + + +def create_system_cpu_time(meter: Meter) -> Counter: + """Deprecated. Use `cpu.time` instead""" + return meter.create_counter( + name=SYSTEM_CPU_TIME, + description="Deprecated. Use `cpu.time` instead.", + unit="s", + ) + + +SYSTEM_CPU_UTILIZATION: Final = "system.cpu.utilization" +""" +Deprecated: Replaced by `cpu.utilization`. +""" + + +def create_system_cpu_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Deprecated. Use `cpu.utilization` instead""" + return meter.create_observable_gauge( + name=SYSTEM_CPU_UTILIZATION, + callbacks=callbacks, + description="Deprecated. Use `cpu.utilization` instead.", + unit="1", + ) + + +SYSTEM_DISK_IO: Final = "system.disk.io" +""" +Instrument: counter +Unit: By +""" + + +def create_system_disk_io(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_DISK_IO, + description="", + unit="By", + ) + + +SYSTEM_DISK_IO_TIME: Final = "system.disk.io_time" +""" +Time disk spent activated +Instrument: counter +Unit: s +Note: The real elapsed time ("wall clock") used in the I/O path (time from operations running in parallel are not counted). Measured as: + +- Linux: Field 13 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) +- Windows: The complement of + ["Disk\\% Idle Time"](https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained) + performance counter: `uptime * (100 - "Disk\\% Idle Time") / 100`. +""" + + +def create_system_disk_io_time(meter: Meter) -> Counter: + """Time disk spent activated""" + return meter.create_counter( + name=SYSTEM_DISK_IO_TIME, + description="Time disk spent activated", + unit="s", + ) + + +SYSTEM_DISK_LIMIT: Final = "system.disk.limit" +""" +The total storage capacity of the disk +Instrument: updowncounter +Unit: By +""" + + +def create_system_disk_limit(meter: Meter) -> UpDownCounter: + """The total storage capacity of the disk""" + return meter.create_up_down_counter( + name=SYSTEM_DISK_LIMIT, + description="The total storage capacity of the disk", + unit="By", + ) + + +SYSTEM_DISK_MERGED: Final = "system.disk.merged" +""" +Instrument: counter +Unit: {operation} +""" + + +def create_system_disk_merged(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_DISK_MERGED, + description="", + unit="{operation}", + ) + + +SYSTEM_DISK_OPERATION_TIME: Final = "system.disk.operation_time" +""" +Sum of the time each operation took to complete +Instrument: counter +Unit: s +Note: Because it is the sum of time each request took, parallel-issued requests each contribute to make the count grow. Measured as: + +- Linux: Fields 7 & 11 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) +- Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" perf counter (similar for Writes). +""" + + +def create_system_disk_operation_time(meter: Meter) -> Counter: + """Sum of the time each operation took to complete""" + return meter.create_counter( + name=SYSTEM_DISK_OPERATION_TIME, + description="Sum of the time each operation took to complete", + unit="s", + ) + + +SYSTEM_DISK_OPERATIONS: Final = "system.disk.operations" +""" +Instrument: counter +Unit: {operation} +""" + + +def create_system_disk_operations(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_DISK_OPERATIONS, + description="", + unit="{operation}", + ) + + +SYSTEM_FILESYSTEM_LIMIT: Final = "system.filesystem.limit" +""" +The total storage capacity of the filesystem +Instrument: updowncounter +Unit: By +""" + + +def create_system_filesystem_limit(meter: Meter) -> UpDownCounter: + """The total storage capacity of the filesystem""" + return meter.create_up_down_counter( + name=SYSTEM_FILESYSTEM_LIMIT, + description="The total storage capacity of the filesystem", + unit="By", + ) + + +SYSTEM_FILESYSTEM_USAGE: Final = "system.filesystem.usage" +""" +Reports a filesystem's space usage across different states +Instrument: updowncounter +Unit: By +Note: The sum of all `system.filesystem.usage` values over the different `system.filesystem.state` attributes +SHOULD equal the total storage capacity of the filesystem, that is `system.filesystem.limit`. +""" + + +def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: + """Reports a filesystem's space usage across different states""" + return meter.create_up_down_counter( + name=SYSTEM_FILESYSTEM_USAGE, + description="Reports a filesystem's space usage across different states.", + unit="By", + ) + + +SYSTEM_FILESYSTEM_UTILIZATION: Final = "system.filesystem.utilization" +""" +Instrument: gauge +Unit: 1 +""" + + +def create_system_filesystem_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + return meter.create_observable_gauge( + name=SYSTEM_FILESYSTEM_UTILIZATION, + callbacks=callbacks, + description="", + unit="1", + ) + + +SYSTEM_LINUX_MEMORY_AVAILABLE: Final = "system.linux.memory.available" +""" +An estimate of how much memory is available for starting new applications, without causing swapping +Instrument: updowncounter +Unit: By +Note: This is an alternative to `system.memory.usage` metric with `state=free`. +Linux starting from 3.14 exports "available" memory. It takes "free" memory as a baseline, and then factors in kernel-specific values. +This is supposed to be more accurate than just "free" memory. +For reference, see the calculations [here](https://superuser.com/a/980821). +See also `MemAvailable` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). +""" + + +def create_system_linux_memory_available(meter: Meter) -> UpDownCounter: + """An estimate of how much memory is available for starting new applications, without causing swapping""" + return meter.create_up_down_counter( + name=SYSTEM_LINUX_MEMORY_AVAILABLE, + description="An estimate of how much memory is available for starting new applications, without causing swapping", + unit="By", + ) + + +SYSTEM_LINUX_MEMORY_SLAB_USAGE: Final = "system.linux.memory.slab.usage" +""" +Reports the memory used by the Linux kernel for managing caches of frequently used objects +Instrument: updowncounter +Unit: By +Note: The sum over the `reclaimable` and `unreclaimable` state values in `linux.memory.slab.usage` SHOULD be equal to the total slab memory available on the system. +Note that the total slab memory is not constant and may vary over time. +See also the [Slab allocator](https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics) and `Slab` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). +""" + + +def create_system_linux_memory_slab_usage(meter: Meter) -> UpDownCounter: + """Reports the memory used by the Linux kernel for managing caches of frequently used objects""" + return meter.create_up_down_counter( + name=SYSTEM_LINUX_MEMORY_SLAB_USAGE, + description="Reports the memory used by the Linux kernel for managing caches of frequently used objects.", + unit="By", + ) + + +SYSTEM_MEMORY_LIMIT: Final = "system.memory.limit" +""" +Total memory available in the system +Instrument: updowncounter +Unit: By +Note: Its value SHOULD equal the sum of `system.memory.state` over all states. +""" + + +def create_system_memory_limit(meter: Meter) -> UpDownCounter: + """Total memory available in the system""" + return meter.create_up_down_counter( + name=SYSTEM_MEMORY_LIMIT, + description="Total memory available in the system.", + unit="By", + ) + + +SYSTEM_MEMORY_SHARED: Final = "system.memory.shared" +""" +Shared memory used (mostly by tmpfs) +Instrument: updowncounter +Unit: By +Note: Equivalent of `shared` from [`free` command](https://man7.org/linux/man-pages/man1/free.1.html) or +`Shmem` from [`/proc/meminfo`](https://man7.org/linux/man-pages/man5/proc.5.html)". +""" + + +def create_system_memory_shared(meter: Meter) -> UpDownCounter: + """Shared memory used (mostly by tmpfs)""" + return meter.create_up_down_counter( + name=SYSTEM_MEMORY_SHARED, + description="Shared memory used (mostly by tmpfs).", + unit="By", + ) + + +SYSTEM_MEMORY_USAGE: Final = "system.memory.usage" +""" +Reports memory in use by state +Instrument: updowncounter +Unit: By +Note: The sum over all `system.memory.state` values SHOULD equal the total memory +available on the system, that is `system.memory.limit`. +""" + + +def create_system_memory_usage(meter: Meter) -> UpDownCounter: + """Reports memory in use by state""" + return meter.create_up_down_counter( + name=SYSTEM_MEMORY_USAGE, + description="Reports memory in use by state.", + unit="By", + ) + + +SYSTEM_MEMORY_UTILIZATION: Final = "system.memory.utilization" +""" +Instrument: gauge +Unit: 1 +""" + + +def create_system_memory_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + return meter.create_observable_gauge( + name=SYSTEM_MEMORY_UTILIZATION, + callbacks=callbacks, + description="", + unit="1", + ) + + +SYSTEM_NETWORK_CONNECTIONS: Final = "system.network.connections" +""" +Instrument: updowncounter +Unit: {connection} +""" + + +def create_system_network_connections(meter: Meter) -> UpDownCounter: + return meter.create_up_down_counter( + name=SYSTEM_NETWORK_CONNECTIONS, + description="", + unit="{connection}", + ) + + +SYSTEM_NETWORK_DROPPED: Final = "system.network.dropped" +""" +Count of packets that are dropped or discarded even though there was no error +Instrument: counter +Unit: {packet} +Note: Measured as: + +- Linux: the `drop` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)) +- Windows: [`InDiscards`/`OutDiscards`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) + from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). +""" + + +def create_system_network_dropped(meter: Meter) -> Counter: + """Count of packets that are dropped or discarded even though there was no error""" + return meter.create_counter( + name=SYSTEM_NETWORK_DROPPED, + description="Count of packets that are dropped or discarded even though there was no error", + unit="{packet}", + ) + + +SYSTEM_NETWORK_ERRORS: Final = "system.network.errors" +""" +Count of network errors detected +Instrument: counter +Unit: {error} +Note: Measured as: + +- Linux: the `errs` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)). +- Windows: [`InErrors`/`OutErrors`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) + from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). +""" + + +def create_system_network_errors(meter: Meter) -> Counter: + """Count of network errors detected""" + return meter.create_counter( + name=SYSTEM_NETWORK_ERRORS, + description="Count of network errors detected", + unit="{error}", + ) + + +SYSTEM_NETWORK_IO: Final = "system.network.io" +""" +Instrument: counter +Unit: By +""" + + +def create_system_network_io(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_NETWORK_IO, + description="", + unit="By", + ) + + +SYSTEM_NETWORK_PACKETS: Final = "system.network.packets" +""" +Instrument: counter +Unit: {packet} +""" + + +def create_system_network_packets(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_NETWORK_PACKETS, + description="", + unit="{packet}", + ) + + +SYSTEM_PAGING_FAULTS: Final = "system.paging.faults" +""" +Instrument: counter +Unit: {fault} +""" + + +def create_system_paging_faults(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_PAGING_FAULTS, + description="", + unit="{fault}", + ) + + +SYSTEM_PAGING_OPERATIONS: Final = "system.paging.operations" +""" +Instrument: counter +Unit: {operation} +""" + + +def create_system_paging_operations(meter: Meter) -> Counter: + return meter.create_counter( + name=SYSTEM_PAGING_OPERATIONS, + description="", + unit="{operation}", + ) + + +SYSTEM_PAGING_USAGE: Final = "system.paging.usage" +""" +Unix swap or windows pagefile usage +Instrument: updowncounter +Unit: By +""" + + +def create_system_paging_usage(meter: Meter) -> UpDownCounter: + """Unix swap or windows pagefile usage""" + return meter.create_up_down_counter( + name=SYSTEM_PAGING_USAGE, + description="Unix swap or windows pagefile usage", + unit="By", + ) + + +SYSTEM_PAGING_UTILIZATION: Final = "system.paging.utilization" +""" +Instrument: gauge +Unit: 1 +""" + + +def create_system_paging_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + return meter.create_observable_gauge( + name=SYSTEM_PAGING_UTILIZATION, + callbacks=callbacks, + description="", + unit="1", + ) + + +SYSTEM_PROCESS_COUNT: Final = "system.process.count" +""" +Total number of processes in each state +Instrument: updowncounter +Unit: {process} +""" + + +def create_system_process_count(meter: Meter) -> UpDownCounter: + """Total number of processes in each state""" + return meter.create_up_down_counter( + name=SYSTEM_PROCESS_COUNT, + description="Total number of processes in each state", + unit="{process}", + ) + + +SYSTEM_PROCESS_CREATED: Final = "system.process.created" +""" +Total number of processes created over uptime of the host +Instrument: counter +Unit: {process} +""" + + +def create_system_process_created(meter: Meter) -> Counter: + """Total number of processes created over uptime of the host""" + return meter.create_counter( + name=SYSTEM_PROCESS_CREATED, + description="Total number of processes created over uptime of the host", + unit="{process}", + ) + + +SYSTEM_UPTIME: Final = "system.uptime" +""" +The time the system has been running +Instrument: gauge +Unit: s +Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +The actual accuracy would depend on the instrumentation and operating system. +""" + + +def create_system_uptime( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time the system has been running""" + return meter.create_observable_gauge( + name=SYSTEM_UPTIME, + callbacks=callbacks, + description="The time the system has been running", + unit="s", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py new file mode 100644 index 00000000..c232751c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py @@ -0,0 +1,233 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import ( + Callable, + Final, + Generator, + Iterable, + Optional, + Sequence, + Union, +) + +from opentelemetry.metrics import ( + CallbackOptions, + Meter, + ObservableGauge, + Observation, + UpDownCounter, +) + +# pylint: disable=invalid-name +CallbackT = Union[ + Callable[[CallbackOptions], Iterable[Observation]], + Generator[Iterable[Observation], CallbackOptions, None], +] + +VCS_CHANGE_COUNT: Final = "vcs.change.count" +""" +The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged) +Instrument: updowncounter +Unit: {change} +""" + + +def create_vcs_change_count(meter: Meter) -> UpDownCounter: + """The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)""" + return meter.create_up_down_counter( + name=VCS_CHANGE_COUNT, + description="The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)", + unit="{change}", + ) + + +VCS_CHANGE_DURATION: Final = "vcs.change.duration" +""" +The time duration a change (pull request/merge request/changelist) has been in a given state +Instrument: gauge +Unit: s +""" + + +def create_vcs_change_duration( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The time duration a change (pull request/merge request/changelist) has been in a given state""" + return meter.create_observable_gauge( + name=VCS_CHANGE_DURATION, + callbacks=callbacks, + description="The time duration a change (pull request/merge request/changelist) has been in a given state.", + unit="s", + ) + + +VCS_CHANGE_TIME_TO_APPROVAL: Final = "vcs.change.time_to_approval" +""" +The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval +Instrument: gauge +Unit: s +""" + + +def create_vcs_change_time_to_approval( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval""" + return meter.create_observable_gauge( + name=VCS_CHANGE_TIME_TO_APPROVAL, + callbacks=callbacks, + description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval.", + unit="s", + ) + + +VCS_CHANGE_TIME_TO_MERGE: Final = "vcs.change.time_to_merge" +""" +The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref +Instrument: gauge +Unit: s +""" + + +def create_vcs_change_time_to_merge( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref""" + return meter.create_observable_gauge( + name=VCS_CHANGE_TIME_TO_MERGE, + callbacks=callbacks, + description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref.", + unit="s", + ) + + +VCS_CONTRIBUTOR_COUNT: Final = "vcs.contributor.count" +""" +The number of unique contributors to a repository +Instrument: gauge +Unit: {contributor} +""" + + +def create_vcs_contributor_count( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The number of unique contributors to a repository""" + return meter.create_observable_gauge( + name=VCS_CONTRIBUTOR_COUNT, + callbacks=callbacks, + description="The number of unique contributors to a repository", + unit="{contributor}", + ) + + +VCS_REF_COUNT: Final = "vcs.ref.count" +""" +The number of refs of type branch or tag in a repository +Instrument: updowncounter +Unit: {ref} +""" + + +def create_vcs_ref_count(meter: Meter) -> UpDownCounter: + """The number of refs of type branch or tag in a repository""" + return meter.create_up_down_counter( + name=VCS_REF_COUNT, + description="The number of refs of type branch or tag in a repository.", + unit="{ref}", + ) + + +VCS_REF_LINES_DELTA: Final = "vcs.ref.lines_delta" +""" +The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute +Instrument: gauge +Unit: {line} +Note: This metric should be reported for each `vcs.line_change.type` value. For example if a ref added 3 lines and removed 2 lines, +instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers). +If number of lines added/removed should be calculated from the start of time, then `vcs.ref.base.name` SHOULD be set to an empty string. +""" + + +def create_vcs_ref_lines_delta( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute""" + return meter.create_observable_gauge( + name=VCS_REF_LINES_DELTA, + callbacks=callbacks, + description="The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute.", + unit="{line}", + ) + + +VCS_REF_REVISIONS_DELTA: Final = "vcs.ref.revisions_delta" +""" +The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute +Instrument: gauge +Unit: {revision} +Note: This metric should be reported for each `vcs.revision_delta.direction` value. For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers) and `vcs.ref.base.name` is set to `trunk`. +""" + + +def create_vcs_ref_revisions_delta( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute""" + return meter.create_observable_gauge( + name=VCS_REF_REVISIONS_DELTA, + callbacks=callbacks, + description="The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute", + unit="{revision}", + ) + + +VCS_REF_TIME: Final = "vcs.ref.time" +""" +Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch` +Instrument: gauge +Unit: s +""" + + +def create_vcs_ref_time( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`""" + return meter.create_observable_gauge( + name=VCS_REF_TIME, + callbacks=callbacks, + description="Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`", + unit="s", + ) + + +VCS_REPOSITORY_COUNT: Final = "vcs.repository.count" +""" +The number of repositories in an organization +Instrument: updowncounter +Unit: {repository} +""" + + +def create_vcs_repository_count(meter: Meter) -> UpDownCounter: + """The number of repositories in an organization""" + return meter.create_up_down_counter( + name=VCS_REPOSITORY_COUNT, + description="The number of repositories in an organization.", + unit="{repository}", + ) diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/__init__.py diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/client_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/client_attributes.py new file mode 100644 index 00000000..d6dd88bf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/client_attributes.py @@ -0,0 +1,27 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +CLIENT_ADDRESS: Final = "client.address" +""" +Client address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +Note: When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent the client address behind any intermediaries, for example proxies, if it's available. +""" + +CLIENT_PORT: Final = "client.port" +""" +Client port number. +Note: When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent the client port behind any intermediaries, for example proxies, if it's available. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/error_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/error_attributes.py new file mode 100644 index 00000000..6ffd2b9b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/error_attributes.py @@ -0,0 +1,45 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +ERROR_TYPE: Final = "error.type" +""" +Describes a class of error the operation ended with. +Note: The `error.type` SHOULD be predictable, and SHOULD have low cardinality. + +When `error.type` is set to a type (e.g., an exception type), its +canonical class name identifying the type within the artifact SHOULD be used. + +Instrumentations SHOULD document the list of errors they report. + +The cardinality of `error.type` within one instrumentation library SHOULD be low. +Telemetry consumers that aggregate data from multiple instrumentation libraries and applications +should be prepared for `error.type` to have high cardinality at query time when no +additional filters are applied. + +If the operation has completed successfully, instrumentations SHOULD NOT set `error.type`. + +If a specific domain defines its own set of error identifiers (such as HTTP or gRPC status codes), +it's RECOMMENDED to: + +- Use a domain-specific attribute +- Set `error.type` to capture all errors, regardless of whether they are defined within the domain-specific set or not. +""" + + +class ErrorTypeValues(Enum): + OTHER = "_OTHER" + """A fallback error value to be used when the instrumentation doesn't define a custom value.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/exception_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/exception_attributes.py new file mode 100644 index 00000000..7f396abe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/exception_attributes.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +EXCEPTION_ESCAPED: Final = "exception.escaped" +""" +Deprecated: It's no longer recommended to record exceptions that are handled and do not escape the scope of a span. +""" + +EXCEPTION_MESSAGE: Final = "exception.message" +""" +The exception message. +""" + +EXCEPTION_STACKTRACE: Final = "exception.stacktrace" +""" +A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. +""" + +EXCEPTION_TYPE: Final = "exception.type" +""" +The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/http_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/http_attributes.py new file mode 100644 index 00000000..bec7c0c7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/http_attributes.py @@ -0,0 +1,97 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +HTTP_REQUEST_HEADER_TEMPLATE: Final = "http.request.header" +""" +HTTP request headers, `<key>` being the normalized HTTP Header name (lowercase), the value being the header values. +Note: Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all request headers can be a security risk - explicit configuration helps avoid leaking sensitive information. +The `User-Agent` header is already captured in the `user_agent.original` attribute. Users MAY explicitly configure instrumentations to capture them even though it is not recommended. +The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. +""" + +HTTP_REQUEST_METHOD: Final = "http.request.method" +""" +HTTP request method. +Note: HTTP request method value SHOULD be "known" to the instrumentation. +By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) +and the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + +If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER`. + +If the HTTP instrumentation could end up converting valid HTTP request methods to `_OTHER`, then it MUST provide a way to override +the list of known HTTP methods. If this override is done via environment variable, then the environment variable MUST be named +OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of case-sensitive known HTTP methods +(this list MUST be a full override of the default known method, it is not a list of known methods in addition to the defaults). + +HTTP method names are case-sensitive and `http.request.method` attribute value MUST match a known HTTP method name exactly. +Instrumentations for specific web frameworks that consider HTTP methods to be case insensitive, SHOULD populate a canonical equivalent. +Tracing instrumentations that do so, MUST also set `http.request.method_original` to the original value. +""" + +HTTP_REQUEST_METHOD_ORIGINAL: Final = "http.request.method_original" +""" +Original HTTP method sent by the client in the request line. +""" + +HTTP_REQUEST_RESEND_COUNT: Final = "http.request.resend_count" +""" +The ordinal number of request resending attempt (for any reason, including redirects). +Note: The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other). +""" + +HTTP_RESPONSE_HEADER_TEMPLATE: Final = "http.response.header" +""" +HTTP response headers, `<key>` being the normalized HTTP Header name (lowercase), the value being the header values. +Note: Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all response headers can be a security risk - explicit configuration helps avoid leaking sensitive information. +Users MAY explicitly configure instrumentations to capture them even though it is not recommended. +The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. +""" + +HTTP_RESPONSE_STATUS_CODE: Final = "http.response.status_code" +""" +[HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). +""" + +HTTP_ROUTE: Final = "http.route" +""" +The matched route, that is, the path template in the format used by the respective server framework. +Note: MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. +SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. +""" + + +class HttpRequestMethodValues(Enum): + CONNECT = "CONNECT" + """CONNECT method.""" + DELETE = "DELETE" + """DELETE method.""" + GET = "GET" + """GET method.""" + HEAD = "HEAD" + """HEAD method.""" + OPTIONS = "OPTIONS" + """OPTIONS method.""" + PATCH = "PATCH" + """PATCH method.""" + POST = "POST" + """POST method.""" + PUT = "PUT" + """PUT method.""" + TRACE = "TRACE" + """TRACE method.""" + OTHER = "_OTHER" + """Any HTTP method that the instrumentation has no prior knowledge of.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/network_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/network_attributes.py new file mode 100644 index 00000000..c09fe2e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/network_attributes.py @@ -0,0 +1,84 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +NETWORK_LOCAL_ADDRESS: Final = "network.local.address" +""" +Local address of the network connection - IP address or Unix domain socket name. +""" + +NETWORK_LOCAL_PORT: Final = "network.local.port" +""" +Local port number of the network connection. +""" + +NETWORK_PEER_ADDRESS: Final = "network.peer.address" +""" +Peer address of the network connection - IP address or Unix domain socket name. +""" + +NETWORK_PEER_PORT: Final = "network.peer.port" +""" +Peer port number of the network connection. +""" + +NETWORK_PROTOCOL_NAME: Final = "network.protocol.name" +""" +[OSI application layer](https://wikipedia.org/wiki/Application_layer) or non-OSI equivalent. +Note: The value SHOULD be normalized to lowercase. +""" + +NETWORK_PROTOCOL_VERSION: Final = "network.protocol.version" +""" +The actual version of the protocol used for network communication. +Note: If protocol version is subject to negotiation (for example using [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute SHOULD be set to the negotiated version. If the actual protocol version is not known, this attribute SHOULD NOT be set. +""" + +NETWORK_TRANSPORT: Final = "network.transport" +""" +[OSI transport layer](https://wikipedia.org/wiki/Transport_layer) or [inter-process communication method](https://wikipedia.org/wiki/Inter-process_communication). +Note: The value SHOULD be normalized to lowercase. + +Consider always setting the transport when setting a port number, since +a port number is ambiguous without knowing the transport. For example +different processes could be listening on TCP port 12345 and UDP port 12345. +""" + +NETWORK_TYPE: Final = "network.type" +""" +[OSI network layer](https://wikipedia.org/wiki/Network_layer) or non-OSI equivalent. +Note: The value SHOULD be normalized to lowercase. +""" + + +class NetworkTransportValues(Enum): + TCP = "tcp" + """TCP.""" + UDP = "udp" + """UDP.""" + PIPE = "pipe" + """Named or anonymous pipe.""" + UNIX = "unix" + """Unix domain socket.""" + QUIC = "quic" + """QUIC.""" + + +class NetworkTypeValues(Enum): + IPV4 = "ipv4" + """IPv4.""" + IPV6 = "ipv6" + """IPv6.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/otel_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/otel_attributes.py new file mode 100644 index 00000000..134e246e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/otel_attributes.py @@ -0,0 +1,43 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +OTEL_SCOPE_NAME: Final = "otel.scope.name" +""" +The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). +""" + +OTEL_SCOPE_VERSION: Final = "otel.scope.version" +""" +The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). +""" + +OTEL_STATUS_CODE: Final = "otel.status_code" +""" +Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. +""" + +OTEL_STATUS_DESCRIPTION: Final = "otel.status_description" +""" +Description of the Status if it has a value, otherwise not set. +""" + + +class OtelStatusCodeValues(Enum): + OK = "OK" + """The operation has been validated by an Application developer or Operator to have completed successfully.""" + ERROR = "ERROR" + """The operation contains an error.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/server_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/server_attributes.py new file mode 100644 index 00000000..6b2658da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/server_attributes.py @@ -0,0 +1,27 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SERVER_ADDRESS: Final = "server.address" +""" +Server domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +Note: When observed from the client side, and when communicating through an intermediary, `server.address` SHOULD represent the server address behind any intermediaries, for example proxies, if it's available. +""" + +SERVER_PORT: Final = "server.port" +""" +Server port number. +Note: When observed from the client side, and when communicating through an intermediary, `server.port` SHOULD represent the server port behind any intermediaries, for example proxies, if it's available. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/service_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/service_attributes.py new file mode 100644 index 00000000..7ad038e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/service_attributes.py @@ -0,0 +1,26 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +SERVICE_NAME: Final = "service.name" +""" +Logical name of the service. +Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`. +""" + +SERVICE_VERSION: Final = "service.version" +""" +The version string of the service API or implementation. The format is not defined by these conventions. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/telemetry_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/telemetry_attributes.py new file mode 100644 index 00000000..29aadeb7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/telemetry_attributes.py @@ -0,0 +1,64 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Final + +TELEMETRY_SDK_LANGUAGE: Final = "telemetry.sdk.language" +""" +The language of the telemetry SDK. +""" + +TELEMETRY_SDK_NAME: Final = "telemetry.sdk.name" +""" +The name of the telemetry SDK as defined above. +Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to `opentelemetry`. +If another SDK, like a fork or a vendor-provided implementation, is used, this SDK MUST set the +`telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point +or another suitable identifier depending on the language. +The identifier `opentelemetry` is reserved and MUST NOT be used in this case. +All custom identifiers SHOULD be stable across different versions of an implementation. +""" + +TELEMETRY_SDK_VERSION: Final = "telemetry.sdk.version" +""" +The version string of the telemetry SDK. +""" + + +class TelemetrySdkLanguageValues(Enum): + CPP = "cpp" + """cpp.""" + DOTNET = "dotnet" + """dotnet.""" + ERLANG = "erlang" + """erlang.""" + GO = "go" + """go.""" + JAVA = "java" + """java.""" + NODEJS = "nodejs" + """nodejs.""" + PHP = "php" + """php.""" + PYTHON = "python" + """python.""" + RUBY = "ruby" + """ruby.""" + RUST = "rust" + """rust.""" + SWIFT = "swift" + """swift.""" + WEBJS = "webjs" + """webjs.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/url_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/url_attributes.py new file mode 100644 index 00000000..404eef1b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/url_attributes.py @@ -0,0 +1,78 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +URL_FRAGMENT: Final = "url.fragment" +""" +The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. +""" + +URL_FULL: Final = "url.full" +""" +Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). +Note: For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment +is not transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + +`url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. +In such case username and password SHOULD be redacted and attribute's value SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + +`url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed). + +Sensitive content provided in `url.full` SHOULD be scrubbed when instrumentations can identify it. + + +Query string values for the following keys SHOULD be redacted by default and replaced by the +value `REDACTED`: + +* [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +* [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +* [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) +* [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) + +This list is subject to change over time. + +When a query string value is redacted, the query string key SHOULD still be preserved, e.g. +`https://www.example.com/path?color=blue&sig=REDACTED`. +""" + +URL_PATH: Final = "url.path" +""" +The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. +Note: Sensitive content provided in `url.path` SHOULD be scrubbed when instrumentations can identify it. +""" + +URL_QUERY: Final = "url.query" +""" +The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. +Note: Sensitive content provided in `url.query` SHOULD be scrubbed when instrumentations can identify it. + + +Query string values for the following keys SHOULD be redacted by default and replaced by the value `REDACTED`: + +* [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +* [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +* [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) +* [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) + +This list is subject to change over time. + +When a query string value is redacted, the query string key SHOULD still be preserved, e.g. +`q=OpenTelemetry&sig=REDACTED`. +""" + +URL_SCHEME: Final = "url.scheme" +""" +The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/user_agent_attributes.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/user_agent_attributes.py new file mode 100644 index 00000000..af5002ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/attributes/user_agent_attributes.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +USER_AGENT_ORIGINAL: Final = "user_agent.original" +""" +Value of the [HTTP User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/__init__.py new file mode 100644 index 00000000..bad7b745 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/__init__.py @@ -0,0 +1,217 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from deprecated import deprecated + + +@deprecated( + version="1.25.0", + reason="Use metrics defined in the :py:const:`opentelemetry.semconv.metrics` and :py:const:`opentelemetry.semconv._incubating.metrics` modules instead.", +) # type: ignore +class MetricInstruments: + SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" + """ + The URL of the OpenTelemetry schema for these keys and values. + """ + + HTTP_SERVER_DURATION = "http.server.duration" + """ + Measures the duration of inbound HTTP requests + Instrument: histogram + Unit: s + """ + + HTTP_SERVER_ACTIVE_REQUESTS = "http.server.active_requests" + """ + Measures the number of concurrent HTTP requests that are currently in-flight + Instrument: updowncounter + Unit: {request} + """ + + HTTP_SERVER_REQUEST_SIZE = "http.server.request.size" + """ + Measures the size of HTTP request messages (compressed) + Instrument: histogram + Unit: By + """ + + HTTP_SERVER_RESPONSE_SIZE = "http.server.response.size" + """ + Measures the size of HTTP response messages (compressed) + Instrument: histogram + Unit: By + """ + + HTTP_CLIENT_DURATION = "http.client.duration" + """ + Measures the duration of outbound HTTP requests + Instrument: histogram + Unit: s + """ + + HTTP_CLIENT_REQUEST_SIZE = "http.client.request.size" + """ + Measures the size of HTTP request messages (compressed) + Instrument: histogram + Unit: By + """ + + HTTP_CLIENT_RESPONSE_SIZE = "http.client.response.size" + """ + Measures the size of HTTP response messages (compressed) + Instrument: histogram + Unit: By + """ + + PROCESS_RUNTIME_JVM_MEMORY_INIT = "process.runtime.jvm.memory.init" + """ + Measure of initial memory requested + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_SYSTEM_CPU_UTILIZATION = ( + "process.runtime.jvm.system.cpu.utilization" + ) + """ + Recent CPU utilization for the whole system as reported by the JVM + Instrument: gauge + Unit: 1 + """ + + PROCESS_RUNTIME_JVM_SYSTEM_CPU_LOAD_1M = ( + "process.runtime.jvm.system.cpu.load_1m" + ) + """ + Average CPU load of the whole system for the last minute as reported by the JVM + Instrument: gauge + Unit: 1 + """ + + PROCESS_RUNTIME_JVM_BUFFER_USAGE = "process.runtime.jvm.buffer.usage" + """ + Measure of memory used by buffers + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_BUFFER_LIMIT = "process.runtime.jvm.buffer.limit" + """ + Measure of total memory capacity of buffers + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_BUFFER_COUNT = "process.runtime.jvm.buffer.count" + """ + Number of buffers in the pool + Instrument: updowncounter + Unit: {buffer} + """ + + PROCESS_RUNTIME_JVM_MEMORY_USAGE = "process.runtime.jvm.memory.usage" + """ + Measure of memory used + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_MEMORY_COMMITTED = ( + "process.runtime.jvm.memory.committed" + ) + """ + Measure of memory committed + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_MEMORY_LIMIT = "process.runtime.jvm.memory.limit" + """ + Measure of max obtainable memory + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_MEMORY_USAGE_AFTER_LAST_GC = ( + "process.runtime.jvm.memory.usage_after_last_gc" + ) + """ + Measure of memory used, as measured after the most recent garbage collection event on this pool + Instrument: updowncounter + Unit: By + """ + + PROCESS_RUNTIME_JVM_GC_DURATION = "process.runtime.jvm.gc.duration" + """ + Duration of JVM garbage collection actions + Instrument: histogram + Unit: s + """ + + PROCESS_RUNTIME_JVM_THREADS_COUNT = "process.runtime.jvm.threads.count" + """ + Number of executing platform threads + Instrument: updowncounter + Unit: {thread} + """ + + PROCESS_RUNTIME_JVM_CLASSES_LOADED = "process.runtime.jvm.classes.loaded" + """ + Number of classes loaded since JVM start + Instrument: counter + Unit: {class} + """ + + PROCESS_RUNTIME_JVM_CLASSES_UNLOADED = ( + "process.runtime.jvm.classes.unloaded" + ) + """ + Number of classes unloaded since JVM start + Instrument: counter + Unit: {class} + """ + + PROCESS_RUNTIME_JVM_CLASSES_CURRENT_LOADED = ( + "process.runtime.jvm.classes.current_loaded" + ) + """ + Number of classes currently loaded + Instrument: updowncounter + Unit: {class} + """ + + PROCESS_RUNTIME_JVM_CPU_TIME = "process.runtime.jvm.cpu.time" + """ + CPU time used by the process as reported by the JVM + Instrument: counter + Unit: s + """ + + PROCESS_RUNTIME_JVM_CPU_RECENT_UTILIZATION = ( + "process.runtime.jvm.cpu.recent_utilization" + ) + """ + Recent CPU utilization for the process as reported by the JVM + Instrument: gauge + Unit: 1 + """ + + # Manually defined metrics + + DB_CLIENT_CONNECTIONS_USAGE = "db.client.connections.usage" + """ + The number of connections that are currently in state described by the `state` attribute + Instrument: UpDownCounter + Unit: {connection} + """ diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/http_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/http_metrics.py new file mode 100644 index 00000000..d0e0db65 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/metrics/http_metrics.py @@ -0,0 +1,31 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +HTTP_CLIENT_REQUEST_DURATION: Final = "http.client.request.duration" +""" +Duration of HTTP client requests +Instrument: histogram +Unit: s +""" + + +HTTP_SERVER_REQUEST_DURATION: Final = "http.server.request.duration" +""" +Duration of HTTP server requests +Instrument: histogram +Unit: s +""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/resource/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/resource/__init__.py new file mode 100644 index 00000000..75496e02 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/resource/__init__.py @@ -0,0 +1,893 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-lines + +from enum import Enum + +from deprecated import deprecated + + +@deprecated( + version="1.25.0", + reason="Use attributes defined in the :py:const:`opentelemetry.semconv.attributes` and :py:const:`opentelemetry.semconv._incubating.attributes` modules instead.", +) # type: ignore +class ResourceAttributes: + SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" + """ + The URL of the OpenTelemetry schema for these keys and values. + """ + BROWSER_BRANDS = "browser.brands" + """ + Array of brand name and version separated by a space. + Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). + """ + + BROWSER_PLATFORM = "browser.platform" + """ + The platform on which the browser is running. + Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. + The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides. + """ + + BROWSER_MOBILE = "browser.mobile" + """ + A boolean that is true if the browser is running on a mobile device. + Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. + """ + + BROWSER_LANGUAGE = "browser.language" + """ + Preferred language of the user using the browser. + Note: This value is intended to be taken from the Navigator API `navigator.language`. + """ + + USER_AGENT_ORIGINAL = "user_agent.original" + """ + Full user-agent string provided by the browser. + Note: The user-agent value SHOULD be provided only from browsers that do not have a mechanism to retrieve brands and platform individually from the User-Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` API can be used. + """ + + CLOUD_PROVIDER = "cloud.provider" + """ + Name of the cloud provider. + """ + + CLOUD_ACCOUNT_ID = "cloud.account.id" + """ + The cloud account ID the resource is assigned to. + """ + + CLOUD_REGION = "cloud.region" + """ + The geographical region the resource is running. + Note: Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). + """ + + CLOUD_RESOURCE_ID = "cloud.resource_id" + """ + Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP). + Note: On some cloud providers, it may not be possible to determine the full ID at startup, + so it may be necessary to set `cloud.resource_id` as a span attribute instead. + + The exact value to use for `cloud.resource_id` depends on the cloud provider. + The following well-known definitions MUST be used if you set this attribute and they apply: + + * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + Take care not to use the "invoked ARN" directly but replace any + [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + with the resolved function version, as the same runtime instance may be invokable with + multiple different aliases. + * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) + * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, + *not* the function app, having the form + `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`. + This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share + a TracerProvider. + """ + + CLOUD_AVAILABILITY_ZONE = "cloud.availability_zone" + """ + Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. + Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + """ + + CLOUD_PLATFORM = "cloud.platform" + """ + The cloud platform in use. + Note: The prefix of the service SHOULD match the one specified in `cloud.provider`. + """ + + AWS_ECS_CONTAINER_ARN = "aws.ecs.container.arn" + """ + The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + """ + + AWS_ECS_CLUSTER_ARN = "aws.ecs.cluster.arn" + """ + The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + """ + + AWS_ECS_LAUNCHTYPE = "aws.ecs.launchtype" + """ + The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. + """ + + AWS_ECS_TASK_ARN = "aws.ecs.task.arn" + """ + The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + """ + + AWS_ECS_TASK_FAMILY = "aws.ecs.task.family" + """ + The task definition family this task definition is a member of. + """ + + AWS_ECS_TASK_REVISION = "aws.ecs.task.revision" + """ + The revision for this task definition. + """ + + AWS_EKS_CLUSTER_ARN = "aws.eks.cluster.arn" + """ + The ARN of an EKS cluster. + """ + + AWS_LOG_GROUP_NAMES = "aws.log.group.names" + """ + The name(s) of the AWS log group(s) an application is writing to. + Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group. + """ + + AWS_LOG_GROUP_ARNS = "aws.log.group.arns" + """ + The Amazon Resource Name(s) (ARN) of the AWS log group(s). + Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + """ + + AWS_LOG_STREAM_NAMES = "aws.log.stream.names" + """ + The name(s) of the AWS log stream(s) an application is writing to. + """ + + AWS_LOG_STREAM_ARNS = "aws.log.stream.arns" + """ + The ARN(s) of the AWS log stream(s). + Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream. + """ + + GCP_CLOUD_RUN_JOB_EXECUTION = "gcp.cloud_run.job.execution" + """ + The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. + """ + + GCP_CLOUD_RUN_JOB_TASK_INDEX = "gcp.cloud_run.job.task_index" + """ + The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. + """ + + GCP_GCE_INSTANCE_NAME = "gcp.gce.instance.name" + """ + The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + """ + + GCP_GCE_INSTANCE_HOSTNAME = "gcp.gce.instance.hostname" + """ + The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + """ + + HEROKU_RELEASE_CREATION_TIMESTAMP = "heroku.release.creation_timestamp" + """ + Time and date the release was created. + """ + + HEROKU_RELEASE_COMMIT = "heroku.release.commit" + """ + Commit hash for the current release. + """ + + HEROKU_APP_ID = "heroku.app.id" + """ + Unique identifier for the application. + """ + + CONTAINER_NAME = "container.name" + """ + Container name used by container runtime. + """ + + CONTAINER_ID = "container.id" + """ + Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated. + """ + + CONTAINER_RUNTIME = "container.runtime" + """ + The container runtime managing this container. + """ + + CONTAINER_IMAGE_NAME = "container.image.name" + """ + Name of the image the container was built on. + """ + + CONTAINER_IMAGE_TAG = "container.image.tag" + """ + Container image tag. + """ + + CONTAINER_IMAGE_ID = "container.image.id" + """ + Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. + K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + OCI defines a digest of manifest. + """ + + CONTAINER_COMMAND = "container.command" + """ + The command used to run the container (i.e. the command name). + Note: If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. + """ + + CONTAINER_COMMAND_LINE = "container.command_line" + """ + The full command run by the container as a single string representing the full command. [2]. + """ + + CONTAINER_COMMAND_ARGS = "container.command_args" + """ + All the command arguments (including the command/executable itself) run by the container. [2]. + """ + + DEPLOYMENT_ENVIRONMENT = "deployment.environment" + """ + Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier). + """ + + DEVICE_ID = "device.id" + """ + A unique identifier representing the device. + Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence. + """ + + DEVICE_MODEL_IDENTIFIER = "device.model.identifier" + """ + The model identifier for the device. + Note: It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device. + """ + + DEVICE_MODEL_NAME = "device.model.name" + """ + The marketing name for the device model. + Note: It's recommended this value represents a human readable version of the device model rather than a machine readable alternative. + """ + + DEVICE_MANUFACTURER = "device.manufacturer" + """ + The name of the device manufacturer. + Note: The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`. + """ + + FAAS_NAME = "faas.name" + """ + The name of the single function that this runtime instance executes. + Note: This is the name of the function as configured/deployed on the FaaS + platform and is usually different from the name of the callback + function (which may be stored in the + [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) + span attributes). + + For some cloud providers, the above definition is ambiguous. The following + definition of function name MUST be used for this attribute + (and consequently the span name) for the listed cloud providers/products: + + * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name + followed by a forward slash followed by the function name (this form + can also be seen in the resource JSON for the function). + This means that a span attribute MUST be used, as an Azure function + app can host multiple functions that would usually share + a TracerProvider (see also the `cloud.resource_id` attribute). + """ + + FAAS_VERSION = "faas.version" + """ + The immutable version of the function being executed. + Note: Depending on the cloud provider and platform, use: + + * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + (an integer represented as a decimal string). + * **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) + (i.e., the function name plus the revision suffix). + * **Google Cloud Functions:** The value of the + [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + * **Azure Functions:** Not applicable. Do not set this attribute. + """ + + FAAS_INSTANCE = "faas.instance" + """ + The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. + Note: * **AWS Lambda:** Use the (full) log stream name. + """ + + FAAS_MAX_MEMORY = "faas.max_memory" + """ + The amount of memory available to the serverless function converted to Bytes. + Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). + """ + + HOST_ID = "host.id" + """ + Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. + """ + + HOST_NAME = "host.name" + """ + Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. + """ + + HOST_TYPE = "host.type" + """ + Type of host. For Cloud, this must be the machine type. + """ + + HOST_ARCH = "host.arch" + """ + The CPU architecture the host system is running on. + """ + + HOST_IMAGE_NAME = "host.image.name" + """ + Name of the VM image or OS install the host was instantiated from. + """ + + HOST_IMAGE_ID = "host.image.id" + """ + VM image ID or host OS image ID. For Cloud, this value is from the provider. + """ + + HOST_IMAGE_VERSION = "host.image.version" + """ + The version string of the VM image or host OS as defined in [Version Attributes](README.md#version-attributes). + """ + + K8S_CLUSTER_NAME = "k8s.cluster.name" + """ + The name of the cluster. + """ + + K8S_CLUSTER_UID = "k8s.cluster.uid" + """ + A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. + Note: K8s does not have support for obtaining a cluster ID. If this is ever + added, we will recommend collecting the `k8s.cluster.uid` through the + official APIs. In the meantime, we are able to use the `uid` of the + `kube-system` namespace as a proxy for cluster ID. Read on for the + rationale. + + Every object created in a K8s cluster is assigned a distinct UID. The + `kube-system` namespace is used by Kubernetes itself and will exist + for the lifetime of the cluster. Using the `uid` of the `kube-system` + namespace is a reasonable proxy for the K8s ClusterID as it will only + change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + UUIDs as standardized by + [ISO/IEC 9834-8 and ITU-T X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + Which states: + + > If generated according to one of the mechanisms defined in Rec. + ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + different from all other UUIDs generated before 3603 A.D., or is + extremely likely to be different (depending on the mechanism chosen). + + Therefore, UIDs between clusters should be extremely unlikely to + conflict. + """ + + K8S_NODE_NAME = "k8s.node.name" + """ + The name of the Node. + """ + + K8S_NODE_UID = "k8s.node.uid" + """ + The UID of the Node. + """ + + K8S_NAMESPACE_NAME = "k8s.namespace.name" + """ + The name of the namespace that the pod is running in. + """ + + K8S_POD_UID = "k8s.pod.uid" + """ + The UID of the Pod. + """ + + K8S_POD_NAME = "k8s.pod.name" + """ + The name of the Pod. + """ + + K8S_CONTAINER_NAME = "k8s.container.name" + """ + The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). + """ + + K8S_CONTAINER_RESTART_COUNT = "k8s.container.restart_count" + """ + Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. + """ + + K8S_REPLICASET_UID = "k8s.replicaset.uid" + """ + The UID of the ReplicaSet. + """ + + K8S_REPLICASET_NAME = "k8s.replicaset.name" + """ + The name of the ReplicaSet. + """ + + K8S_DEPLOYMENT_UID = "k8s.deployment.uid" + """ + The UID of the Deployment. + """ + + K8S_DEPLOYMENT_NAME = "k8s.deployment.name" + """ + The name of the Deployment. + """ + + K8S_STATEFULSET_UID = "k8s.statefulset.uid" + """ + The UID of the StatefulSet. + """ + + K8S_STATEFULSET_NAME = "k8s.statefulset.name" + """ + The name of the StatefulSet. + """ + + K8S_DAEMONSET_UID = "k8s.daemonset.uid" + """ + The UID of the DaemonSet. + """ + + K8S_DAEMONSET_NAME = "k8s.daemonset.name" + """ + The name of the DaemonSet. + """ + + K8S_JOB_UID = "k8s.job.uid" + """ + The UID of the Job. + """ + + K8S_JOB_NAME = "k8s.job.name" + """ + The name of the Job. + """ + + K8S_CRONJOB_UID = "k8s.cronjob.uid" + """ + The UID of the CronJob. + """ + + K8S_CRONJOB_NAME = "k8s.cronjob.name" + """ + The name of the CronJob. + """ + + OS_TYPE = "os.type" + """ + The operating system type. + """ + + OS_DESCRIPTION = "os.description" + """ + Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. + """ + + OS_NAME = "os.name" + """ + Human readable operating system name. + """ + + OS_VERSION = "os.version" + """ + The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). + """ + + PROCESS_PID = "process.pid" + """ + Process identifier (PID). + """ + + PROCESS_PARENT_PID = "process.parent_pid" + """ + Parent Process identifier (PID). + """ + + PROCESS_EXECUTABLE_NAME = "process.executable.name" + """ + The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`. + """ + + PROCESS_EXECUTABLE_PATH = "process.executable.path" + """ + The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. + """ + + PROCESS_COMMAND = "process.command" + """ + The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. + """ + + PROCESS_COMMAND_LINE = "process.command_line" + """ + The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. + """ + + PROCESS_COMMAND_ARGS = "process.command_args" + """ + All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. + """ + + PROCESS_OWNER = "process.owner" + """ + The username of the user that owns the process. + """ + + PROCESS_RUNTIME_NAME = "process.runtime.name" + """ + The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler. + """ + + PROCESS_RUNTIME_VERSION = "process.runtime.version" + """ + The version of the runtime of this process, as returned by the runtime without modification. + """ + + PROCESS_RUNTIME_DESCRIPTION = "process.runtime.description" + """ + An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. + """ + + SERVICE_NAME = "service.name" + """ + Logical name of the service. + Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`. + """ + + SERVICE_VERSION = "service.version" + """ + The version string of the service API or implementation. The format is not defined by these conventions. + """ + + SERVICE_NAMESPACE = "service.namespace" + """ + A namespace for `service.name`. + Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace. + """ + + SERVICE_INSTANCE_ID = "service.instance.id" + """ + The string ID of the service instance. + Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations). + """ + + TELEMETRY_SDK_NAME = "telemetry.sdk.name" + """ + The name of the telemetry SDK as defined above. + Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to `opentelemetry`. + If another SDK, like a fork or a vendor-provided implementation, is used, this SDK MUST set the + `telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point + or another suitable identifier depending on the language. + The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + All custom identifiers SHOULD be stable across different versions of an implementation. + """ + + TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language" + """ + The language of the telemetry SDK. + """ + + TELEMETRY_SDK_VERSION = "telemetry.sdk.version" + """ + The version string of the telemetry SDK. + """ + + TELEMETRY_AUTO_VERSION = "telemetry.auto.version" + """ + The version string of the auto instrumentation agent, if used. + """ + + WEBENGINE_NAME = "webengine.name" + """ + The name of the web engine. + """ + + WEBENGINE_VERSION = "webengine.version" + """ + The version of the web engine. + """ + + WEBENGINE_DESCRIPTION = "webengine.description" + """ + Additional description of the web engine (e.g. detailed version and edition information). + """ + + OTEL_SCOPE_NAME = "otel.scope.name" + """ + The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). + """ + + OTEL_SCOPE_VERSION = "otel.scope.version" + """ + The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + """ + + OTEL_LIBRARY_NAME = "otel.library.name" + """ + Deprecated, use the `otel.scope.name` attribute. + """ + + OTEL_LIBRARY_VERSION = "otel.library.version" + """ + Deprecated, use the `otel.scope.version` attribute. + """ + + # Manually defined deprecated attributes + + FAAS_ID = "faas.id" + """ + Deprecated, use the `cloud.resource.id` attribute. + """ + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.CloudProviderValues` instead.", +) # type: ignore +class CloudProviderValues(Enum): + ALIBABA_CLOUD = "alibaba_cloud" + """Alibaba Cloud.""" + + AWS = "aws" + """Amazon Web Services.""" + + AZURE = "azure" + """Microsoft Azure.""" + + GCP = "gcp" + """Google Cloud Platform.""" + + HEROKU = "heroku" + """Heroku Platform as a Service.""" + + IBM_CLOUD = "ibm_cloud" + """IBM Cloud.""" + + TENCENT_CLOUD = "tencent_cloud" + """Tencent Cloud.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.CloudPlatformValues` instead.", +) # type: ignore +class CloudPlatformValues(Enum): + ALIBABA_CLOUD_ECS = "alibaba_cloud_ecs" + """Alibaba Cloud Elastic Compute Service.""" + + ALIBABA_CLOUD_FC = "alibaba_cloud_fc" + """Alibaba Cloud Function Compute.""" + + ALIBABA_CLOUD_OPENSHIFT = "alibaba_cloud_openshift" + """Red Hat OpenShift on Alibaba Cloud.""" + + AWS_EC2 = "aws_ec2" + """AWS Elastic Compute Cloud.""" + + AWS_ECS = "aws_ecs" + """AWS Elastic Container Service.""" + + AWS_EKS = "aws_eks" + """AWS Elastic Kubernetes Service.""" + + AWS_LAMBDA = "aws_lambda" + """AWS Lambda.""" + + AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk" + """AWS Elastic Beanstalk.""" + + AWS_APP_RUNNER = "aws_app_runner" + """AWS App Runner.""" + + AWS_OPENSHIFT = "aws_openshift" + """Red Hat OpenShift on AWS (ROSA).""" + + AZURE_VM = "azure_vm" + """Azure Virtual Machines.""" + + AZURE_CONTAINER_INSTANCES = "azure_container_instances" + """Azure Container Instances.""" + + AZURE_AKS = "azure_aks" + """Azure Kubernetes Service.""" + + AZURE_FUNCTIONS = "azure_functions" + """Azure Functions.""" + + AZURE_APP_SERVICE = "azure_app_service" + """Azure App Service.""" + + AZURE_OPENSHIFT = "azure_openshift" + """Azure Red Hat OpenShift.""" + + GCP_BARE_METAL_SOLUTION = "gcp_bare_metal_solution" + """Google Bare Metal Solution (BMS).""" + + GCP_COMPUTE_ENGINE = "gcp_compute_engine" + """Google Cloud Compute Engine (GCE).""" + + GCP_CLOUD_RUN = "gcp_cloud_run" + """Google Cloud Run.""" + + GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine" + """Google Cloud Kubernetes Engine (GKE).""" + + GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions" + """Google Cloud Functions (GCF).""" + + GCP_APP_ENGINE = "gcp_app_engine" + """Google Cloud App Engine (GAE).""" + + GCP_OPENSHIFT = "gcp_openshift" + """Red Hat OpenShift on Google Cloud.""" + + IBM_CLOUD_OPENSHIFT = "ibm_cloud_openshift" + """Red Hat OpenShift on IBM Cloud.""" + + TENCENT_CLOUD_CVM = "tencent_cloud_cvm" + """Tencent Cloud Cloud Virtual Machine (CVM).""" + + TENCENT_CLOUD_EKS = "tencent_cloud_eks" + """Tencent Cloud Elastic Kubernetes Service (EKS).""" + + TENCENT_CLOUD_SCF = "tencent_cloud_scf" + """Tencent Cloud Serverless Cloud Function (SCF).""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.AwsEcsLaunchtypeValues` instead.", +) # type: ignore +class AwsEcsLaunchtypeValues(Enum): + EC2 = "ec2" + """ec2.""" + + FARGATE = "fargate" + """fargate.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.HostArchValues` instead.", +) # type: ignore +class HostArchValues(Enum): + AMD64 = "amd64" + """AMD64.""" + + ARM32 = "arm32" + """ARM32.""" + + ARM64 = "arm64" + """ARM64.""" + + IA64 = "ia64" + """Itanium.""" + + PPC32 = "ppc32" + """32-bit PowerPC.""" + + PPC64 = "ppc64" + """64-bit PowerPC.""" + + S390X = "s390x" + """IBM z/Architecture.""" + + X86 = "x86" + """32-bit x86.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.OsTypeValues` instead.", +) # type: ignore +class OsTypeValues(Enum): + WINDOWS = "windows" + """Microsoft Windows.""" + + LINUX = "linux" + """Linux.""" + + DARWIN = "darwin" + """Apple Darwin.""" + + FREEBSD = "freebsd" + """FreeBSD.""" + + NETBSD = "netbsd" + """NetBSD.""" + + OPENBSD = "openbsd" + """OpenBSD.""" + + DRAGONFLYBSD = "dragonflybsd" + """DragonFly BSD.""" + + HPUX = "hpux" + """HP-UX (Hewlett Packard Unix).""" + + AIX = "aix" + """AIX (Advanced Interactive eXecutive).""" + + SOLARIS = "solaris" + """SunOS, Oracle Solaris.""" + + Z_OS = "z_os" + """IBM z/OS.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv.attributes.TelemetrySdkLanguageValues` instead.", +) # type: ignore +class TelemetrySdkLanguageValues(Enum): + CPP = "cpp" + """cpp.""" + + DOTNET = "dotnet" + """dotnet.""" + + ERLANG = "erlang" + """erlang.""" + + GO = "go" + """go.""" + + JAVA = "java" + """java.""" + + NODEJS = "nodejs" + """nodejs.""" + + PHP = "php" + """php.""" + + PYTHON = "python" + """python.""" + + RUBY = "ruby" + """ruby.""" + + RUST = "rust" + """rust.""" + + SWIFT = "swift" + """swift.""" + + WEBJS = "webjs" + """webjs.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/schemas.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/schemas.py new file mode 100644 index 00000000..bea8b2cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/schemas.py @@ -0,0 +1,59 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class Schemas(Enum): + V1_23_1 = "https://opentelemetry.io/schemas/1.23.1" + """ + The URL of the OpenTelemetry schema version 1.23.1. + """ + + V1_25_0 = "https://opentelemetry.io/schemas/1.25.0" + """ + The URL of the OpenTelemetry schema version 1.25.0. + """ + + V1_26_0 = "https://opentelemetry.io/schemas/1.26.0" + """ + The URL of the OpenTelemetry schema version 1.26.0. + """ + + V1_27_0 = "https://opentelemetry.io/schemas/1.27.0" + """ + The URL of the OpenTelemetry schema version 1.27.0. + """ + + V1_28_0 = "https://opentelemetry.io/schemas/1.28.0" + """ + The URL of the OpenTelemetry schema version 1.28.0. + """ + + V1_29_0 = "https://opentelemetry.io/schemas/1.29.0" + """ + The URL of the OpenTelemetry schema version 1.29.0. + """ + + V1_30_0 = "https://opentelemetry.io/schemas/1.30.0" + """ + The URL of the OpenTelemetry schema version 1.30.0. + """ + + V1_31_0 = "https://opentelemetry.io/schemas/1.31.0" + """ + The URL of the OpenTelemetry schema version 1.31.0. + """ + # when generating new semantic conventions, + # make sure to add new versions version here. diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/trace/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/trace/__init__.py new file mode 100644 index 00000000..299d3db0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/trace/__init__.py @@ -0,0 +1,2217 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-lines + +from enum import Enum + +from deprecated import deprecated + + +@deprecated( + version="1.25.0", + reason="Use attributes defined in the :py:const:`opentelemetry.semconv.attributes` and :py:const:`opentelemetry.semconv._incubating.attributes` modules instead.", +) # type: ignore +class SpanAttributes: + SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" + """ + The URL of the OpenTelemetry schema for these keys and values. + """ + CLIENT_ADDRESS = "client.address" + """ + Client address - unix domain socket name, IPv4 or IPv6 address. + Note: When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent client address behind any intermediaries (e.g. proxies) if it's available. + """ + + CLIENT_PORT = "client.port" + """ + Client port number. + Note: When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent client port behind any intermediaries (e.g. proxies) if it's available. + """ + + CLIENT_SOCKET_ADDRESS = "client.socket.address" + """ + Immediate client peer address - unix domain socket name, IPv4 or IPv6 address. + """ + + CLIENT_SOCKET_PORT = "client.socket.port" + """ + Immediate client peer port number. + """ + + HTTP_METHOD = "http.method" + """ + Deprecated, use `http.request.method` instead. + """ + + HTTP_STATUS_CODE = "http.status_code" + """ + Deprecated, use `http.response.status_code` instead. + """ + + HTTP_SCHEME = "http.scheme" + """ + Deprecated, use `url.scheme` instead. + """ + + HTTP_URL = "http.url" + """ + Deprecated, use `url.full` instead. + """ + + HTTP_TARGET = "http.target" + """ + Deprecated, use `url.path` and `url.query` instead. + """ + + HTTP_REQUEST_CONTENT_LENGTH = "http.request_content_length" + """ + Deprecated, use `http.request.body.size` instead. + """ + + HTTP_RESPONSE_CONTENT_LENGTH = "http.response_content_length" + """ + Deprecated, use `http.response.body.size` instead. + """ + + NET_SOCK_PEER_NAME = "net.sock.peer.name" + """ + Deprecated, use `server.socket.domain` on client spans. + """ + + NET_SOCK_PEER_ADDR = "net.sock.peer.addr" + """ + Deprecated, use `server.socket.address` on client spans and `client.socket.address` on server spans. + """ + + NET_SOCK_PEER_PORT = "net.sock.peer.port" + """ + Deprecated, use `server.socket.port` on client spans and `client.socket.port` on server spans. + """ + + NET_PEER_NAME = "net.peer.name" + """ + Deprecated, use `server.address` on client spans and `client.address` on server spans. + """ + + NET_PEER_PORT = "net.peer.port" + """ + Deprecated, use `server.port` on client spans and `client.port` on server spans. + """ + + NET_HOST_NAME = "net.host.name" + """ + Deprecated, use `server.address`. + """ + + NET_HOST_PORT = "net.host.port" + """ + Deprecated, use `server.port`. + """ + + NET_SOCK_HOST_ADDR = "net.sock.host.addr" + """ + Deprecated, use `server.socket.address`. + """ + + NET_SOCK_HOST_PORT = "net.sock.host.port" + """ + Deprecated, use `server.socket.port`. + """ + + NET_TRANSPORT = "net.transport" + """ + Deprecated, use `network.transport`. + """ + + NET_PROTOCOL_NAME = "net.protocol.name" + """ + Deprecated, use `network.protocol.name`. + """ + + NET_PROTOCOL_VERSION = "net.protocol.version" + """ + Deprecated, use `network.protocol.version`. + """ + + NET_SOCK_FAMILY = "net.sock.family" + """ + Deprecated, use `network.transport` and `network.type`. + """ + + DESTINATION_DOMAIN = "destination.domain" + """ + The domain name of the destination system. + Note: This value may be a host name, a fully qualified domain name, or another host naming format. + """ + + DESTINATION_ADDRESS = "destination.address" + """ + Peer address, for example IP address or UNIX socket name. + """ + + DESTINATION_PORT = "destination.port" + """ + Peer port number. + """ + + EXCEPTION_TYPE = "exception.type" + """ + The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. + """ + + EXCEPTION_MESSAGE = "exception.message" + """ + The exception message. + """ + + EXCEPTION_STACKTRACE = "exception.stacktrace" + """ + A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. + """ + + HTTP_REQUEST_METHOD = "http.request.method" + """ + HTTP request method. + Note: HTTP request method value SHOULD be "known" to the instrumentation. + By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + and the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + + If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER` and, except if reporting a metric, MUST + set the exact method received in the request line as value of the `http.request.method_original` attribute. + + If the HTTP instrumentation could end up converting valid HTTP request methods to `_OTHER`, then it MUST provide a way to override + the list of known HTTP methods. If this override is done via environment variable, then the environment variable MUST be named + OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of case-sensitive known HTTP methods + (this list MUST be a full override of the default known method, it is not a list of known methods in addition to the defaults). + + HTTP method names are case-sensitive and `http.request.method` attribute value MUST match a known HTTP method name exactly. + Instrumentations for specific web frameworks that consider HTTP methods to be case insensitive, SHOULD populate a canonical equivalent. + Tracing instrumentations that do so, MUST also set `http.request.method_original` to the original value. + """ + + HTTP_RESPONSE_STATUS_CODE = "http.response.status_code" + """ + [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + """ + + NETWORK_PROTOCOL_NAME = "network.protocol.name" + """ + [OSI Application Layer](https://osi-model.com/application-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. + """ + + NETWORK_PROTOCOL_VERSION = "network.protocol.version" + """ + Version of the application layer protocol used. See note below. + Note: `network.protocol.version` refers to the version of the protocol used and might be different from the protocol client's version. If the HTTP client used has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should be set to `1.1`. + """ + + SERVER_ADDRESS = "server.address" + """ + Host identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to. + Note: Determined by using the first of the following that applies + + - Host identifier of the [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) + if it's sent in absolute-form + - Host identifier of the `Host` header + + SHOULD NOT be set if capturing it would require an extra DNS lookup. + """ + + SERVER_PORT = "server.port" + """ + Port identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to. + Note: When [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) is absolute URI, `server.port` MUST match URI port identifier, otherwise it MUST match `Host` header port identifier. + """ + + HTTP_ROUTE = "http.route" + """ + The matched route (path template in the format used by the respective server framework). See note below. + Note: MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. + SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. + """ + + URL_SCHEME = "url.scheme" + """ + The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. + """ + + EVENT_NAME = "event.name" + """ + The name identifies the event. + """ + + EVENT_DOMAIN = "event.domain" + """ + The domain identifies the business context for the events. + Note: Events across different domains may have same `event.name`, yet be + unrelated events. + """ + + LOG_RECORD_UID = "log.record.uid" + """ + A unique identifier for the Log Record. + Note: If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. + The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed. + """ + + FEATURE_FLAG_KEY = "feature_flag.key" + """ + The unique identifier of the feature flag. + """ + + FEATURE_FLAG_PROVIDER_NAME = "feature_flag.provider_name" + """ + The name of the service provider that performs the flag evaluation. + """ + + FEATURE_FLAG_VARIANT = "feature_flag.variant" + """ + SHOULD be a semantic identifier for a value. If one is unavailable, a stringified version of the value can be used. + Note: A semantic identifier, commonly referred to as a variant, provides a means + for referring to a value without including the value itself. This can + provide additional context for understanding the meaning behind a value. + For example, the variant `red` maybe be used for the value `#c05543`. + + A stringified version of the value can be used in situations where a + semantic identifier is unavailable. String representation of the value + should be determined by the implementer. + """ + + LOG_IOSTREAM = "log.iostream" + """ + The stream associated with the log. See below for a list of well-known values. + """ + + LOG_FILE_NAME = "log.file.name" + """ + The basename of the file. + """ + + LOG_FILE_PATH = "log.file.path" + """ + The full path to the file. + """ + + LOG_FILE_NAME_RESOLVED = "log.file.name_resolved" + """ + The basename of the file, with symlinks resolved. + """ + + LOG_FILE_PATH_RESOLVED = "log.file.path_resolved" + """ + The full path to the file, with symlinks resolved. + """ + + SERVER_SOCKET_ADDRESS = "server.socket.address" + """ + Physical server IP address or Unix socket address. If set from the client, should simply use the socket's peer address, and not attempt to find any actual server IP (i.e., if set from client, this may represent some proxy server instead of the logical server). + """ + + POOL = "pool" + """ + Name of the buffer pool. + Note: Pool names are generally obtained via [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + """ + + TYPE = "type" + """ + The type of memory. + """ + + SERVER_SOCKET_DOMAIN = "server.socket.domain" + """ + The domain name of an immediate peer. + Note: Typically observed from the client side, and represents a proxy or other intermediary domain name. + """ + + SERVER_SOCKET_PORT = "server.socket.port" + """ + Physical server port. + """ + + SOURCE_DOMAIN = "source.domain" + """ + The domain name of the source system. + Note: This value may be a host name, a fully qualified domain name, or another host naming format. + """ + + SOURCE_ADDRESS = "source.address" + """ + Source address, for example IP address or Unix socket name. + """ + + SOURCE_PORT = "source.port" + """ + Source port number. + """ + + AWS_LAMBDA_INVOKED_ARN = "aws.lambda.invoked_arn" + """ + The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). + Note: This may be different from `cloud.resource_id` if an alias is involved. + """ + + CLOUDEVENTS_EVENT_ID = "cloudevents.event_id" + """ + The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. + """ + + CLOUDEVENTS_EVENT_SOURCE = "cloudevents.event_source" + """ + The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. + """ + + CLOUDEVENTS_EVENT_SPEC_VERSION = "cloudevents.event_spec_version" + """ + The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + """ + + CLOUDEVENTS_EVENT_TYPE = "cloudevents.event_type" + """ + The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. + """ + + CLOUDEVENTS_EVENT_SUBJECT = "cloudevents.event_subject" + """ + The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). + """ + + OPENTRACING_REF_TYPE = "opentracing.ref_type" + """ + Parent-child Reference type. + Note: The causal relationship between a child Span and a parent Span. + """ + + DB_SYSTEM = "db.system" + """ + An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers. + """ + + DB_CONNECTION_STRING = "db.connection_string" + """ + The connection string used to connect to the database. It is recommended to remove embedded credentials. + """ + + DB_USER = "db.user" + """ + Username for accessing the database. + """ + + DB_JDBC_DRIVER_CLASSNAME = "db.jdbc.driver_classname" + """ + The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect. + """ + + DB_NAME = "db.name" + """ + This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails). + Note: In some SQL databases, the database name to be used is called "schema name". In case there are multiple layers that could be considered for database name (e.g. Oracle instance name and schema name), the database name to be used is the more specific layer (e.g. Oracle schema name). + """ + + DB_STATEMENT = "db.statement" + """ + The database statement being executed. + """ + + DB_OPERATION = "db.operation" + """ + The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword. + Note: When setting this to an SQL keyword, it is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if the operation name is provided by the library being instrumented. If the SQL statement has an ambiguous operation, or performs more than one operation, this value may be omitted. + """ + + NETWORK_TRANSPORT = "network.transport" + """ + [OSI Transport Layer](https://osi-model.com/transport-layer/) or [Inter-process Communication method](https://en.wikipedia.org/wiki/Inter-process_communication). The value SHOULD be normalized to lowercase. + """ + + NETWORK_TYPE = "network.type" + """ + [OSI Network Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. + """ + + DB_MSSQL_INSTANCE_NAME = "db.mssql.instance_name" + """ + The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance. + Note: If setting a `db.mssql.instance_name`, `server.port` is no longer required (but still recommended if non-standard). + """ + + DB_CASSANDRA_PAGE_SIZE = "db.cassandra.page_size" + """ + The fetch size used for paging, i.e. how many rows will be returned at once. + """ + + DB_CASSANDRA_CONSISTENCY_LEVEL = "db.cassandra.consistency_level" + """ + The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + """ + + DB_CASSANDRA_TABLE = "db.cassandra.table" + """ + The name of the primary table that the operation is acting upon, including the keyspace name (if applicable). + Note: This mirrors the db.sql.table attribute but references cassandra rather than sql. It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set. + """ + + DB_CASSANDRA_IDEMPOTENCE = "db.cassandra.idempotence" + """ + Whether or not the query is idempotent. + """ + + DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT = ( + "db.cassandra.speculative_execution_count" + ) + """ + The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. + """ + + DB_CASSANDRA_COORDINATOR_ID = "db.cassandra.coordinator.id" + """ + The ID of the coordinating node for a query. + """ + + DB_CASSANDRA_COORDINATOR_DC = "db.cassandra.coordinator.dc" + """ + The data center of the coordinating node for a query. + """ + + DB_REDIS_DATABASE_INDEX = "db.redis.database_index" + """ + The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute. + """ + + DB_MONGODB_COLLECTION = "db.mongodb.collection" + """ + The collection being accessed within the database stated in `db.name`. + """ + + URL_FULL = "url.full" + """ + Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). + Note: For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment is not transmitted over HTTP, but if it is known, it should be included nevertheless. + `url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case username and password should be redacted and attribute's value should be `https://REDACTED:REDACTED@www.example.com/`. + `url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed) and SHOULD NOT be validated or modified except for sanitizing purposes. + """ + + DB_SQL_TABLE = "db.sql.table" + """ + The name of the primary table that the operation is acting upon, including the database name (if applicable). + Note: It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set. + """ + + DB_COSMOSDB_CLIENT_ID = "db.cosmosdb.client_id" + """ + Unique Cosmos client instance id. + """ + + DB_COSMOSDB_OPERATION_TYPE = "db.cosmosdb.operation_type" + """ + CosmosDB Operation Type. + """ + + USER_AGENT_ORIGINAL = "user_agent.original" + """ + Full user-agent string is generated by Cosmos DB SDK. + Note: The user-agent value is generated by SDK which is a combination of<br> `sdk_version` : Current version of SDK. e.g. 'cosmos-netstandard-sdk/3.23.0'<br> `direct_pkg_version` : Direct package version used by Cosmos DB SDK. e.g. '3.23.1'<br> `number_of_client_instances` : Number of cosmos client instances created by the application. e.g. '1'<br> `type_of_machine_architecture` : Machine architecture. e.g. 'X64'<br> `operating_system` : Operating System. e.g. 'Linux 5.4.0-1098-azure 104 18'<br> `runtime_framework` : Runtime Framework. e.g. '.NET Core 3.1.32'<br> `failover_information` : Generated key to determine if region failover enabled. + Format Reg-{D (Disabled discovery)}-S(application region)|L(List of preferred regions)|N(None, user did not configure it). + Default value is "NS". + """ + + DB_COSMOSDB_CONNECTION_MODE = "db.cosmosdb.connection_mode" + """ + Cosmos client connection mode. + """ + + DB_COSMOSDB_CONTAINER = "db.cosmosdb.container" + """ + Cosmos DB container name. + """ + + DB_COSMOSDB_REQUEST_CONTENT_LENGTH = "db.cosmosdb.request_content_length" + """ + Request payload size in bytes. + """ + + DB_COSMOSDB_STATUS_CODE = "db.cosmosdb.status_code" + """ + Cosmos DB status code. + """ + + DB_COSMOSDB_SUB_STATUS_CODE = "db.cosmosdb.sub_status_code" + """ + Cosmos DB sub status code. + """ + + DB_COSMOSDB_REQUEST_CHARGE = "db.cosmosdb.request_charge" + """ + RU consumed for that operation. + """ + + OTEL_STATUS_CODE = "otel.status_code" + """ + Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. + """ + + OTEL_STATUS_DESCRIPTION = "otel.status_description" + """ + Description of the Status if it has a value, otherwise not set. + """ + + FAAS_TRIGGER = "faas.trigger" + """ + Type of the trigger which caused this function invocation. + Note: For the server/consumer span on the incoming side, + `faas.trigger` MUST be set. + + Clients invoking FaaS instances usually cannot set `faas.trigger`, + since they would typically need to look in the payload to determine + the event type. If clients set it, it should be the same as the + trigger that corresponding incoming would have (i.e., this has + nothing to do with the underlying transport used to make the API + call to invoke the lambda, which is often HTTP). + """ + + FAAS_INVOCATION_ID = "faas.invocation_id" + """ + The invocation ID of the current function invocation. + """ + + CLOUD_RESOURCE_ID = "cloud.resource_id" + """ + Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP). + Note: On some cloud providers, it may not be possible to determine the full ID at startup, + so it may be necessary to set `cloud.resource_id` as a span attribute instead. + + The exact value to use for `cloud.resource_id` depends on the cloud provider. + The following well-known definitions MUST be used if you set this attribute and they apply: + + * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + Take care not to use the "invoked ARN" directly but replace any + [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + with the resolved function version, as the same runtime instance may be invokable with + multiple different aliases. + * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) + * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, + *not* the function app, having the form + `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`. + This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share + a TracerProvider. + """ + + FAAS_DOCUMENT_COLLECTION = "faas.document.collection" + """ + The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. + """ + + FAAS_DOCUMENT_OPERATION = "faas.document.operation" + """ + Describes the type of the operation that was performed on the data. + """ + + FAAS_DOCUMENT_TIME = "faas.document.time" + """ + A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + """ + + FAAS_DOCUMENT_NAME = "faas.document.name" + """ + The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. + """ + + URL_PATH = "url.path" + """ + The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. + Note: When missing, the value is assumed to be `/`. + """ + + URL_QUERY = "url.query" + """ + The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. + Note: Sensitive content provided in query string SHOULD be scrubbed when instrumentations can identify it. + """ + + MESSAGING_SYSTEM = "messaging.system" + """ + A string identifying the messaging system. + """ + + MESSAGING_OPERATION = "messaging.operation" + """ + A string identifying the kind of messaging operation as defined in the [Operation names](#operation-names) section above. + Note: If a custom value is used, it MUST be of low cardinality. + """ + + MESSAGING_BATCH_MESSAGE_COUNT = "messaging.batch.message_count" + """ + The number of messages sent, received, or processed in the scope of the batching operation. + Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs. + """ + + MESSAGING_CLIENT_ID = "messaging.client_id" + """ + A unique identifier for the client that consumes or produces a message. + """ + + MESSAGING_DESTINATION_NAME = "messaging.destination.name" + """ + The message destination name. + Note: Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If + the broker does not have such notion, the destination name SHOULD uniquely identify the broker. + """ + + MESSAGING_DESTINATION_TEMPLATE = "messaging.destination.template" + """ + Low cardinality representation of the messaging destination name. + Note: Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation. + """ + + MESSAGING_DESTINATION_TEMPORARY = "messaging.destination.temporary" + """ + A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. + """ + + MESSAGING_DESTINATION_ANONYMOUS = "messaging.destination.anonymous" + """ + A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). + """ + + MESSAGING_MESSAGE_ID = "messaging.message.id" + """ + A value used by the messaging system as an identifier for the message, represented as a string. + """ + + MESSAGING_MESSAGE_CONVERSATION_ID = "messaging.message.conversation_id" + """ + The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". + """ + + MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES = ( + "messaging.message.payload_size_bytes" + ) + """ + The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported. + """ + + MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES = ( + "messaging.message.payload_compressed_size_bytes" + ) + """ + The compressed size of the message payload in bytes. + """ + + FAAS_TIME = "faas.time" + """ + A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + """ + + FAAS_CRON = "faas.cron" + """ + A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + """ + + FAAS_COLDSTART = "faas.coldstart" + """ + A boolean that is true if the serverless function is executed for the first time (aka cold-start). + """ + + FAAS_INVOKED_NAME = "faas.invoked_name" + """ + The name of the invoked function. + Note: SHOULD be equal to the `faas.name` resource attribute of the invoked function. + """ + + FAAS_INVOKED_PROVIDER = "faas.invoked_provider" + """ + The cloud provider of the invoked function. + Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. + """ + + FAAS_INVOKED_REGION = "faas.invoked_region" + """ + The cloud region of the invoked function. + Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked function. + """ + + NETWORK_CONNECTION_TYPE = "network.connection.type" + """ + The internet connection type. + """ + + NETWORK_CONNECTION_SUBTYPE = "network.connection.subtype" + """ + This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. + """ + + NETWORK_CARRIER_NAME = "network.carrier.name" + """ + The name of the mobile carrier. + """ + + NETWORK_CARRIER_MCC = "network.carrier.mcc" + """ + The mobile carrier country code. + """ + + NETWORK_CARRIER_MNC = "network.carrier.mnc" + """ + The mobile carrier network code. + """ + + NETWORK_CARRIER_ICC = "network.carrier.icc" + """ + The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. + """ + + PEER_SERVICE = "peer.service" + """ + The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. + """ + + ENDUSER_ID = "enduser.id" + """ + Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system. + """ + + ENDUSER_ROLE = "enduser.role" + """ + Actual/assumed role the client is making the request under extracted from token or application security context. + """ + + ENDUSER_SCOPE = "enduser.scope" + """ + Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + """ + + THREAD_ID = "thread.id" + """ + Current "managed" thread ID (as opposed to OS thread ID). + """ + + THREAD_NAME = "thread.name" + """ + Current thread name. + """ + + CODE_FUNCTION = "code.function" + """ + The method or function name, or equivalent (usually rightmost part of the code unit's name). + """ + + CODE_NAMESPACE = "code.namespace" + """ + The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. + """ + + CODE_FILEPATH = "code.filepath" + """ + The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). + """ + + CODE_LINENO = "code.lineno" + """ + The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. + """ + + CODE_COLUMN = "code.column" + """ + The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. + """ + + HTTP_REQUEST_METHOD_ORIGINAL = "http.request.method_original" + """ + Original HTTP method sent by the client in the request line. + """ + + HTTP_REQUEST_BODY_SIZE = "http.request.body.size" + """ + The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. + """ + + HTTP_RESPONSE_BODY_SIZE = "http.response.body.size" + """ + The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. + """ + + HTTP_RESEND_COUNT = "http.resend_count" + """ + The ordinal number of request resending attempt (for any reason, including redirects). + Note: The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other). + """ + + RPC_SYSTEM = "rpc.system" + """ + The value `aws-api`. + """ + + RPC_SERVICE = "rpc.service" + """ + The name of the service to which a request is made, as returned by the AWS SDK. + Note: This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side). + """ + + RPC_METHOD = "rpc.method" + """ + The name of the operation corresponding to the request, as returned by the AWS SDK. + Note: This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). + """ + + AWS_REQUEST_ID = "aws.request_id" + """ + The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid`. + """ + + AWS_DYNAMODB_TABLE_NAMES = "aws.dynamodb.table_names" + """ + The keys in the `RequestItems` object field. + """ + + AWS_DYNAMODB_CONSUMED_CAPACITY = "aws.dynamodb.consumed_capacity" + """ + The JSON-serialized value of each item in the `ConsumedCapacity` response field. + """ + + AWS_DYNAMODB_ITEM_COLLECTION_METRICS = ( + "aws.dynamodb.item_collection_metrics" + ) + """ + The JSON-serialized value of the `ItemCollectionMetrics` response field. + """ + + AWS_DYNAMODB_PROVISIONED_READ_CAPACITY = ( + "aws.dynamodb.provisioned_read_capacity" + ) + """ + The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + """ + + AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY = ( + "aws.dynamodb.provisioned_write_capacity" + ) + """ + The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + """ + + AWS_DYNAMODB_CONSISTENT_READ = "aws.dynamodb.consistent_read" + """ + The value of the `ConsistentRead` request parameter. + """ + + AWS_DYNAMODB_PROJECTION = "aws.dynamodb.projection" + """ + The value of the `ProjectionExpression` request parameter. + """ + + AWS_DYNAMODB_LIMIT = "aws.dynamodb.limit" + """ + The value of the `Limit` request parameter. + """ + + AWS_DYNAMODB_ATTRIBUTES_TO_GET = "aws.dynamodb.attributes_to_get" + """ + The value of the `AttributesToGet` request parameter. + """ + + AWS_DYNAMODB_INDEX_NAME = "aws.dynamodb.index_name" + """ + The value of the `IndexName` request parameter. + """ + + AWS_DYNAMODB_SELECT = "aws.dynamodb.select" + """ + The value of the `Select` request parameter. + """ + + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES = ( + "aws.dynamodb.global_secondary_indexes" + ) + """ + The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. + """ + + AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES = ( + "aws.dynamodb.local_secondary_indexes" + ) + """ + The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. + """ + + AWS_DYNAMODB_EXCLUSIVE_START_TABLE = "aws.dynamodb.exclusive_start_table" + """ + The value of the `ExclusiveStartTableName` request parameter. + """ + + AWS_DYNAMODB_TABLE_COUNT = "aws.dynamodb.table_count" + """ + The the number of items in the `TableNames` response parameter. + """ + + AWS_DYNAMODB_SCAN_FORWARD = "aws.dynamodb.scan_forward" + """ + The value of the `ScanIndexForward` request parameter. + """ + + AWS_DYNAMODB_SEGMENT = "aws.dynamodb.segment" + """ + The value of the `Segment` request parameter. + """ + + AWS_DYNAMODB_TOTAL_SEGMENTS = "aws.dynamodb.total_segments" + """ + The value of the `TotalSegments` request parameter. + """ + + AWS_DYNAMODB_COUNT = "aws.dynamodb.count" + """ + The value of the `Count` response parameter. + """ + + AWS_DYNAMODB_SCANNED_COUNT = "aws.dynamodb.scanned_count" + """ + The value of the `ScannedCount` response parameter. + """ + + AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS = "aws.dynamodb.attribute_definitions" + """ + The JSON-serialized value of each item in the `AttributeDefinitions` request field. + """ + + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES = ( + "aws.dynamodb.global_secondary_index_updates" + ) + """ + The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field. + """ + + AWS_S3_BUCKET = "aws.s3.bucket" + """ + The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. + Note: The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. + This applies to almost all S3 operations except `list-buckets`. + """ + + AWS_S3_KEY = "aws.s3.key" + """ + The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. + Note: The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. + This applies in particular to the following operations: + + - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + - [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + - [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + - [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + - [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + - [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + - [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + - [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). + """ + + AWS_S3_COPY_SOURCE = "aws.s3.copy_source" + """ + The source object (in the form `bucket`/`key`) for the copy operation. + Note: The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter + of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + This applies in particular to the following operations: + + - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). + """ + + AWS_S3_UPLOAD_ID = "aws.s3.upload_id" + """ + Upload ID that identifies the multipart upload. + Note: The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter + of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. + This applies in particular to the following operations: + + - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). + """ + + AWS_S3_DELETE = "aws.s3.delete" + """ + The delete request container that specifies the objects to be deleted. + Note: The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. + The `delete` attribute corresponds to the `--delete` parameter of the + [delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + """ + + AWS_S3_PART_NUMBER = "aws.s3.part_number" + """ + The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. + Note: The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. + The `part_number` attribute corresponds to the `--part-number` parameter of the + [upload-part operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + """ + + GRAPHQL_OPERATION_NAME = "graphql.operation.name" + """ + The name of the operation being executed. + """ + + GRAPHQL_OPERATION_TYPE = "graphql.operation.type" + """ + The type of the operation being executed. + """ + + GRAPHQL_DOCUMENT = "graphql.document" + """ + The GraphQL document being executed. + Note: The value may be sanitized to exclude sensitive information. + """ + + MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY = ( + "messaging.rabbitmq.destination.routing_key" + ) + """ + RabbitMQ message routing key. + """ + + MESSAGING_KAFKA_MESSAGE_KEY = "messaging.kafka.message.key" + """ + Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. + Note: If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. + """ + + MESSAGING_KAFKA_CONSUMER_GROUP = "messaging.kafka.consumer.group" + """ + Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers. + """ + + MESSAGING_KAFKA_DESTINATION_PARTITION = ( + "messaging.kafka.destination.partition" + ) + """ + Partition the message is sent to. + """ + + MESSAGING_KAFKA_MESSAGE_OFFSET = "messaging.kafka.message.offset" + """ + The offset of a record in the corresponding Kafka partition. + """ + + MESSAGING_KAFKA_MESSAGE_TOMBSTONE = "messaging.kafka.message.tombstone" + """ + A boolean that is true if the message is a tombstone. + """ + + MESSAGING_ROCKETMQ_NAMESPACE = "messaging.rocketmq.namespace" + """ + Namespace of RocketMQ resources, resources in different namespaces are individual. + """ + + MESSAGING_ROCKETMQ_CLIENT_GROUP = "messaging.rocketmq.client_group" + """ + Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind. + """ + + MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP = ( + "messaging.rocketmq.message.delivery_timestamp" + ) + """ + The timestamp in milliseconds that the delay message is expected to be delivered to consumer. + """ + + MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL = ( + "messaging.rocketmq.message.delay_time_level" + ) + """ + The delay time level for delay message, which determines the message delay time. + """ + + MESSAGING_ROCKETMQ_MESSAGE_GROUP = "messaging.rocketmq.message.group" + """ + It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. + """ + + MESSAGING_ROCKETMQ_MESSAGE_TYPE = "messaging.rocketmq.message.type" + """ + Type of message. + """ + + MESSAGING_ROCKETMQ_MESSAGE_TAG = "messaging.rocketmq.message.tag" + """ + The secondary classifier of message besides topic. + """ + + MESSAGING_ROCKETMQ_MESSAGE_KEYS = "messaging.rocketmq.message.keys" + """ + Key(s) of message, another way to mark message besides message id. + """ + + MESSAGING_ROCKETMQ_CONSUMPTION_MODEL = ( + "messaging.rocketmq.consumption_model" + ) + """ + Model of message consumption. This only applies to consumer spans. + """ + + RPC_GRPC_STATUS_CODE = "rpc.grpc.status_code" + """ + The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. + """ + + RPC_JSONRPC_VERSION = "rpc.jsonrpc.version" + """ + Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted. + """ + + RPC_JSONRPC_REQUEST_ID = "rpc.jsonrpc.request_id" + """ + `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. + """ + + RPC_JSONRPC_ERROR_CODE = "rpc.jsonrpc.error_code" + """ + `error.code` property of response if it is an error response. + """ + + RPC_JSONRPC_ERROR_MESSAGE = "rpc.jsonrpc.error_message" + """ + `error.message` property of response if it is an error response. + """ + + MESSAGE_TYPE = "message.type" + """ + Whether this is a received or sent message. + """ + + MESSAGE_ID = "message.id" + """ + MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. + Note: This way we guarantee that the values will be consistent between different implementations. + """ + + MESSAGE_COMPRESSED_SIZE = "message.compressed_size" + """ + Compressed size of the message in bytes. + """ + + MESSAGE_UNCOMPRESSED_SIZE = "message.uncompressed_size" + """ + Uncompressed size of the message in bytes. + """ + + RPC_CONNECT_RPC_ERROR_CODE = "rpc.connect_rpc.error_code" + """ + The [error codes](https://connect.build/docs/protocol/#error-codes) of the Connect request. Error codes are always string values. + """ + + EXCEPTION_ESCAPED = "exception.escaped" + """ + SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span. + Note: An exception is considered to have escaped (or left) the scope of a span, + if that span is ended while the exception is still logically "in flight". + This may be actually "in flight" in some languages (e.g. if the exception + is passed to a Context manager's `__exit__` method in Python) but will + usually be caught at the point of recording the exception in most languages. + + It is usually not possible to determine at the point where an exception is thrown + whether it will escape the scope of a span. + However, it is trivial to know that an exception + will escape, if one checks for an active exception just before ending the span, + as done in the [example above](#recording-an-exception). + + It follows that an exception may still escape the scope of the span + even if the `exception.escaped` attribute was not set or set to false, + since the event might have been recorded at a time where it was not + clear whether the exception will escape. + """ + + URL_FRAGMENT = "url.fragment" + """ + The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. + """ + + # Manually defined deprecated attributes + + NET_PEER_IP = "net.peer.ip" + """ + Deprecated, use the `client.socket.address` attribute. + """ + + NET_HOST_IP = "net.host.ip" + """ + Deprecated, use the `server.socket.address` attribute. + """ + + HTTP_SERVER_NAME = "http.server_name" + """ + Deprecated, use the `server.address` attribute. + """ + + HTTP_HOST = "http.host" + """ + Deprecated, use the `server.address` and `server.port` attributes. + """ + + HTTP_RETRY_COUNT = "http.retry_count" + """ + Deprecated, use the `http.resend_count` attribute. + """ + + HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED = ( + "http.request_content_length_uncompressed" + ) + """ + Deprecated, use the `http.request.body.size` attribute. + """ + + HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED = ( + "http.response_content_length_uncompressed" + ) + """ + Deprecated, use the `http.response.body.size` attribute. + """ + + MESSAGING_DESTINATION = "messaging.destination" + """ + Deprecated, use the `messaging.destination.name` attribute. + """ + + MESSAGING_DESTINATION_KIND = "messaging.destination_kind" + """ + Deprecated. + """ + + MESSAGING_TEMP_DESTINATION = "messaging.temp_destination" + """ + Deprecated. Use `messaging.destination.temporary` attribute. + """ + + MESSAGING_PROTOCOL = "messaging.protocol" + """ + Deprecated. Use `network.protocol.name` attribute. + """ + + MESSAGING_PROTOCOL_VERSION = "messaging.protocol_version" + """ + Deprecated. Use `network.protocol.version` attribute. + """ + + MESSAGING_URL = "messaging.url" + """ + Deprecated. Use `server.address` and `server.port` attributes. + """ + + MESSAGING_CONVERSATION_ID = "messaging.conversation_id" + """ + Deprecated. Use `messaging.message.conversation.id` attribute. + """ + + MESSAGING_KAFKA_PARTITION = "messaging.kafka.partition" + """ + Deprecated. Use `messaging.kafka.destination.partition` attribute. + """ + + FAAS_EXECUTION = "faas.execution" + """ + Deprecated. Use `faas.invocation_id` attribute. + """ + + HTTP_USER_AGENT = "http.user_agent" + """ + Deprecated. Use `user_agent.original` attribute. + """ + + MESSAGING_RABBITMQ_ROUTING_KEY = "messaging.rabbitmq.routing_key" + """ + Deprecated. Use `messaging.rabbitmq.destination.routing_key` attribute. + """ + + MESSAGING_KAFKA_TOMBSTONE = "messaging.kafka.tombstone" + """ + Deprecated. Use `messaging.kafka.destination.tombstone` attribute. + """ + + NET_APP_PROTOCOL_NAME = "net.app.protocol.name" + """ + Deprecated. Use `network.protocol.name` attribute. + """ + + NET_APP_PROTOCOL_VERSION = "net.app.protocol.version" + """ + Deprecated. Use `network.protocol.version` attribute. + """ + + HTTP_CLIENT_IP = "http.client_ip" + """ + Deprecated. Use `client.address` attribute. + """ + + HTTP_FLAVOR = "http.flavor" + """ + Deprecated. Use `network.protocol.name` and `network.protocol.version` attributes. + """ + + NET_HOST_CONNECTION_TYPE = "net.host.connection.type" + """ + Deprecated. Use `network.connection.type` attribute. + """ + + NET_HOST_CONNECTION_SUBTYPE = "net.host.connection.subtype" + """ + Deprecated. Use `network.connection.subtype` attribute. + """ + + NET_HOST_CARRIER_NAME = "net.host.carrier.name" + """ + Deprecated. Use `network.carrier.name` attribute. + """ + + NET_HOST_CARRIER_MCC = "net.host.carrier.mcc" + """ + Deprecated. Use `network.carrier.mcc` attribute. + """ + + NET_HOST_CARRIER_MNC = "net.host.carrier.mnc" + """ + Deprecated. Use `network.carrier.mnc` attribute. + """ + + MESSAGING_CONSUMER_ID = "messaging.consumer_id" + """ + Deprecated. Use `messaging.client_id` attribute. + """ + + MESSAGING_KAFKA_CLIENT_ID = "messaging.kafka.client_id" + """ + Deprecated. Use `messaging.client_id` attribute. + """ + + MESSAGING_ROCKETMQ_CLIENT_ID = "messaging.rocketmq.client_id" + """ + Deprecated. Use `messaging.client_id` attribute. + """ + + +@deprecated( + version="1.18.0", + reason="Removed from the specification in favor of `network.protocol.name` and `network.protocol.version` attributes", +) # type: ignore +class HttpFlavorValues(Enum): + HTTP_1_0 = "1.0" + + HTTP_1_1 = "1.1" + + HTTP_2_0 = "2.0" + + HTTP_3_0 = "3.0" + + SPDY = "SPDY" + + QUIC = "QUIC" + + +@deprecated( + version="1.18.0", + reason="Removed from the specification", +) # type: ignore +class MessagingDestinationKindValues(Enum): + QUEUE = "queue" + """A message sent to a queue.""" + + TOPIC = "topic" + """A message sent to a topic.""" + + +@deprecated( + version="1.21.0", + reason="Renamed to NetworkConnectionTypeValues", +) # type: ignore +class NetHostConnectionTypeValues(Enum): + WIFI = "wifi" + """wifi.""" + + WIRED = "wired" + """wired.""" + + CELL = "cell" + """cell.""" + + UNAVAILABLE = "unavailable" + """unavailable.""" + + UNKNOWN = "unknown" + """unknown.""" + + +@deprecated( + version="1.21.0", + reason="Renamed to NetworkConnectionSubtypeValues", +) # type: ignore +class NetHostConnectionSubtypeValues(Enum): + GPRS = "gprs" + """GPRS.""" + + EDGE = "edge" + """EDGE.""" + + UMTS = "umts" + """UMTS.""" + + CDMA = "cdma" + """CDMA.""" + + EVDO_0 = "evdo_0" + """EVDO Rel. 0.""" + + EVDO_A = "evdo_a" + """EVDO Rev. A.""" + + CDMA2000_1XRTT = "cdma2000_1xrtt" + """CDMA2000 1XRTT.""" + + HSDPA = "hsdpa" + """HSDPA.""" + + HSUPA = "hsupa" + """HSUPA.""" + + HSPA = "hspa" + """HSPA.""" + + IDEN = "iden" + """IDEN.""" + + EVDO_B = "evdo_b" + """EVDO Rev. B.""" + + LTE = "lte" + """LTE.""" + + EHRPD = "ehrpd" + """EHRPD.""" + + HSPAP = "hspap" + """HSPAP.""" + + GSM = "gsm" + """GSM.""" + + TD_SCDMA = "td_scdma" + """TD-SCDMA.""" + + IWLAN = "iwlan" + """IWLAN.""" + + NR = "nr" + """5G NR (New Radio).""" + + NRNSA = "nrnsa" + """5G NRNSA (New Radio Non-Standalone).""" + + LTE_CA = "lte_ca" + """LTE CA.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv.attributes.NetworkTransportValues` instead.", +) # type: ignore +class NetTransportValues(Enum): + IP_TCP = "ip_tcp" + """ip_tcp.""" + + IP_UDP = "ip_udp" + """ip_udp.""" + + PIPE = "pipe" + """Named or anonymous pipe.""" + + INPROC = "inproc" + """In-process communication.""" + + OTHER = "other" + """Something else (non IP-based).""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv.attributes.NetworkType` instead.", +) # type: ignore +class NetSockFamilyValues(Enum): + INET = "inet" + """IPv4 address.""" + + INET6 = "inet6" + """IPv6 address.""" + + UNIX = "unix" + """Unix domain socket path.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv.attributes.HttpRequestMethodValues` instead.", +) # type: ignore +class HttpRequestMethodValues(Enum): + CONNECT = "CONNECT" + """CONNECT method.""" + + DELETE = "DELETE" + """DELETE method.""" + + GET = "GET" + """GET method.""" + + HEAD = "HEAD" + """HEAD method.""" + + OPTIONS = "OPTIONS" + """OPTIONS method.""" + + PATCH = "PATCH" + """PATCH method.""" + + POST = "POST" + """POST method.""" + + PUT = "PUT" + """PUT method.""" + + TRACE = "TRACE" + """TRACE method.""" + + OTHER = "_OTHER" + """Any HTTP method that the instrumentation has no prior knowledge of.""" + + +@deprecated(version="1.25.0", reason="Removed from the specification.") # type: ignore +class EventDomainValues(Enum): + BROWSER = "browser" + """Events from browser apps.""" + + DEVICE = "device" + """Events from mobile apps.""" + + K8S = "k8s" + """Events from Kubernetes.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.LogIostreamValues` instead.", +) # type: ignore +class LogIostreamValues(Enum): + STDOUT = "stdout" + """Logs from stdout stream.""" + + STDERR = "stderr" + """Events from stderr stream.""" + + +@deprecated(version="1.25.0", reason="Removed from the specification.") # type: ignore +class TypeValues(Enum): + HEAP = "heap" + """Heap memory.""" + + NON_HEAP = "non_heap" + """Non-heap memory.""" + + +@deprecated( + version="1.25.0", + reason="Use :py:const:`opentelemetry.semconv._incubating.attributes.OpentracingRefTypeValues` instead.", +) # type: ignore +class OpentracingRefTypeValues(Enum): + CHILD_OF = "child_of" + """The parent Span depends on the child Span in some capacity.""" + + FOLLOWS_FROM = "follows_from" + """The parent Span does not depend in any way on the result of the child Span.""" + + +class DbSystemValues(Enum): + OTHER_SQL = "other_sql" + """Some other SQL database. Fallback only. See notes.""" + + MSSQL = "mssql" + """Microsoft SQL Server.""" + + MSSQLCOMPACT = "mssqlcompact" + """Microsoft SQL Server Compact.""" + + MYSQL = "mysql" + """MySQL.""" + + ORACLE = "oracle" + """Oracle Database.""" + + DB2 = "db2" + """IBM Db2.""" + + POSTGRESQL = "postgresql" + """PostgreSQL.""" + + REDSHIFT = "redshift" + """Amazon Redshift.""" + + HIVE = "hive" + """Apache Hive.""" + + CLOUDSCAPE = "cloudscape" + """Cloudscape.""" + + HSQLDB = "hsqldb" + """HyperSQL DataBase.""" + + PROGRESS = "progress" + """Progress Database.""" + + MAXDB = "maxdb" + """SAP MaxDB.""" + + HANADB = "hanadb" + """SAP HANA.""" + + INGRES = "ingres" + """Ingres.""" + + FIRSTSQL = "firstsql" + """FirstSQL.""" + + EDB = "edb" + """EnterpriseDB.""" + + CACHE = "cache" + """InterSystems Caché.""" + + ADABAS = "adabas" + """Adabas (Adaptable Database System).""" + + FIREBIRD = "firebird" + """Firebird.""" + + DERBY = "derby" + """Apache Derby.""" + + FILEMAKER = "filemaker" + """FileMaker.""" + + INFORMIX = "informix" + """Informix.""" + + INSTANTDB = "instantdb" + """InstantDB.""" + + INTERBASE = "interbase" + """InterBase.""" + + MARIADB = "mariadb" + """MariaDB.""" + + NETEZZA = "netezza" + """Netezza.""" + + PERVASIVE = "pervasive" + """Pervasive PSQL.""" + + POINTBASE = "pointbase" + """PointBase.""" + + SQLITE = "sqlite" + """SQLite.""" + + SYBASE = "sybase" + """Sybase.""" + + TERADATA = "teradata" + """Teradata.""" + + VERTICA = "vertica" + """Vertica.""" + + H2 = "h2" + """H2.""" + + COLDFUSION = "coldfusion" + """ColdFusion IMQ.""" + + CASSANDRA = "cassandra" + """Apache Cassandra.""" + + HBASE = "hbase" + """Apache HBase.""" + + MONGODB = "mongodb" + """MongoDB.""" + + REDIS = "redis" + """Redis.""" + + COUCHBASE = "couchbase" + """Couchbase.""" + + COUCHDB = "couchdb" + """CouchDB.""" + + COSMOSDB = "cosmosdb" + """Microsoft Azure Cosmos DB.""" + + DYNAMODB = "dynamodb" + """Amazon DynamoDB.""" + + NEO4J = "neo4j" + """Neo4j.""" + + GEODE = "geode" + """Apache Geode.""" + + ELASTICSEARCH = "elasticsearch" + """Elasticsearch.""" + + MEMCACHED = "memcached" + """Memcached.""" + + COCKROACHDB = "cockroachdb" + """CockroachDB.""" + + OPENSEARCH = "opensearch" + """OpenSearch.""" + + CLICKHOUSE = "clickhouse" + """ClickHouse.""" + + SPANNER = "spanner" + """Cloud Spanner.""" + + TRINO = "trino" + """Trino.""" + + +class NetworkTransportValues(Enum): + TCP = "tcp" + """TCP.""" + + UDP = "udp" + """UDP.""" + + PIPE = "pipe" + """Named or anonymous pipe. See note below.""" + + UNIX = "unix" + """Unix domain socket.""" + + +class NetworkTypeValues(Enum): + IPV4 = "ipv4" + """IPv4.""" + + IPV6 = "ipv6" + """IPv6.""" + + +class DbCassandraConsistencyLevelValues(Enum): + ALL = "all" + """all.""" + + EACH_QUORUM = "each_quorum" + """each_quorum.""" + + QUORUM = "quorum" + """quorum.""" + + LOCAL_QUORUM = "local_quorum" + """local_quorum.""" + + ONE = "one" + """one.""" + + TWO = "two" + """two.""" + + THREE = "three" + """three.""" + + LOCAL_ONE = "local_one" + """local_one.""" + + ANY = "any" + """any.""" + + SERIAL = "serial" + """serial.""" + + LOCAL_SERIAL = "local_serial" + """local_serial.""" + + +class DbCosmosdbOperationTypeValues(Enum): + INVALID = "Invalid" + """invalid.""" + + CREATE = "Create" + """create.""" + + PATCH = "Patch" + """patch.""" + + READ = "Read" + """read.""" + + READ_FEED = "ReadFeed" + """read_feed.""" + + DELETE = "Delete" + """delete.""" + + REPLACE = "Replace" + """replace.""" + + EXECUTE = "Execute" + """execute.""" + + QUERY = "Query" + """query.""" + + HEAD = "Head" + """head.""" + + HEAD_FEED = "HeadFeed" + """head_feed.""" + + UPSERT = "Upsert" + """upsert.""" + + BATCH = "Batch" + """batch.""" + + QUERY_PLAN = "QueryPlan" + """query_plan.""" + + EXECUTE_JAVASCRIPT = "ExecuteJavaScript" + """execute_javascript.""" + + +class DbCosmosdbConnectionModeValues(Enum): + GATEWAY = "gateway" + """Gateway (HTTP) connections mode.""" + + DIRECT = "direct" + """Direct connection.""" + + +class OtelStatusCodeValues(Enum): + OK = "OK" + """The operation has been validated by an Application developer or Operator to have completed successfully.""" + + ERROR = "ERROR" + """The operation contains an error.""" + + +class FaasTriggerValues(Enum): + DATASOURCE = "datasource" + """A response to some data source operation such as a database or filesystem read/write.""" + + HTTP = "http" + """To provide an answer to an inbound HTTP request.""" + + PUBSUB = "pubsub" + """A function is set to be executed when messages are sent to a messaging system.""" + + TIMER = "timer" + """A function is scheduled to be executed regularly.""" + + OTHER = "other" + """If none of the others apply.""" + + +class FaasDocumentOperationValues(Enum): + INSERT = "insert" + """When a new object is created.""" + + EDIT = "edit" + """When an object is modified.""" + + DELETE = "delete" + """When an object is deleted.""" + + +class MessagingOperationValues(Enum): + PUBLISH = "publish" + """publish.""" + + RECEIVE = "receive" + """receive.""" + + PROCESS = "process" + """process.""" + + +class FaasInvokedProviderValues(Enum): + ALIBABA_CLOUD = "alibaba_cloud" + """Alibaba Cloud.""" + + AWS = "aws" + """Amazon Web Services.""" + + AZURE = "azure" + """Microsoft Azure.""" + + GCP = "gcp" + """Google Cloud Platform.""" + + TENCENT_CLOUD = "tencent_cloud" + """Tencent Cloud.""" + + +class NetworkConnectionTypeValues(Enum): + WIFI = "wifi" + """wifi.""" + + WIRED = "wired" + """wired.""" + + CELL = "cell" + """cell.""" + + UNAVAILABLE = "unavailable" + """unavailable.""" + + UNKNOWN = "unknown" + """unknown.""" + + +class NetworkConnectionSubtypeValues(Enum): + GPRS = "gprs" + """GPRS.""" + + EDGE = "edge" + """EDGE.""" + + UMTS = "umts" + """UMTS.""" + + CDMA = "cdma" + """CDMA.""" + + EVDO_0 = "evdo_0" + """EVDO Rel. 0.""" + + EVDO_A = "evdo_a" + """EVDO Rev. A.""" + + CDMA2000_1XRTT = "cdma2000_1xrtt" + """CDMA2000 1XRTT.""" + + HSDPA = "hsdpa" + """HSDPA.""" + + HSUPA = "hsupa" + """HSUPA.""" + + HSPA = "hspa" + """HSPA.""" + + IDEN = "iden" + """IDEN.""" + + EVDO_B = "evdo_b" + """EVDO Rev. B.""" + + LTE = "lte" + """LTE.""" + + EHRPD = "ehrpd" + """EHRPD.""" + + HSPAP = "hspap" + """HSPAP.""" + + GSM = "gsm" + """GSM.""" + + TD_SCDMA = "td_scdma" + """TD-SCDMA.""" + + IWLAN = "iwlan" + """IWLAN.""" + + NR = "nr" + """5G NR (New Radio).""" + + NRNSA = "nrnsa" + """5G NRNSA (New Radio Non-Standalone).""" + + LTE_CA = "lte_ca" + """LTE CA.""" + + +class RpcSystemValues(Enum): + GRPC = "grpc" + """gRPC.""" + + JAVA_RMI = "java_rmi" + """Java RMI.""" + + DOTNET_WCF = "dotnet_wcf" + """.NET WCF.""" + + APACHE_DUBBO = "apache_dubbo" + """Apache Dubbo.""" + + CONNECT_RPC = "connect_rpc" + """Connect RPC.""" + + +class GraphqlOperationTypeValues(Enum): + QUERY = "query" + """GraphQL query.""" + + MUTATION = "mutation" + """GraphQL mutation.""" + + SUBSCRIPTION = "subscription" + """GraphQL subscription.""" + + +class MessagingRocketmqMessageTypeValues(Enum): + NORMAL = "normal" + """Normal message.""" + + FIFO = "fifo" + """FIFO message.""" + + DELAY = "delay" + """Delay message.""" + + TRANSACTION = "transaction" + """Transaction message.""" + + +class MessagingRocketmqConsumptionModelValues(Enum): + CLUSTERING = "clustering" + """Clustering consumption model.""" + + BROADCASTING = "broadcasting" + """Broadcasting consumption model.""" + + +class RpcGrpcStatusCodeValues(Enum): + OK = 0 + """OK.""" + + CANCELLED = 1 + """CANCELLED.""" + + UNKNOWN = 2 + """UNKNOWN.""" + + INVALID_ARGUMENT = 3 + """INVALID_ARGUMENT.""" + + DEADLINE_EXCEEDED = 4 + """DEADLINE_EXCEEDED.""" + + NOT_FOUND = 5 + """NOT_FOUND.""" + + ALREADY_EXISTS = 6 + """ALREADY_EXISTS.""" + + PERMISSION_DENIED = 7 + """PERMISSION_DENIED.""" + + RESOURCE_EXHAUSTED = 8 + """RESOURCE_EXHAUSTED.""" + + FAILED_PRECONDITION = 9 + """FAILED_PRECONDITION.""" + + ABORTED = 10 + """ABORTED.""" + + OUT_OF_RANGE = 11 + """OUT_OF_RANGE.""" + + UNIMPLEMENTED = 12 + """UNIMPLEMENTED.""" + + INTERNAL = 13 + """INTERNAL.""" + + UNAVAILABLE = 14 + """UNAVAILABLE.""" + + DATA_LOSS = 15 + """DATA_LOSS.""" + + UNAUTHENTICATED = 16 + """UNAUTHENTICATED.""" + + +class MessageTypeValues(Enum): + SENT = "SENT" + """sent.""" + + RECEIVED = "RECEIVED" + """received.""" + + +class RpcConnectRpcErrorCodeValues(Enum): + CANCELLED = "cancelled" + """cancelled.""" + + UNKNOWN = "unknown" + """unknown.""" + + INVALID_ARGUMENT = "invalid_argument" + """invalid_argument.""" + + DEADLINE_EXCEEDED = "deadline_exceeded" + """deadline_exceeded.""" + + NOT_FOUND = "not_found" + """not_found.""" + + ALREADY_EXISTS = "already_exists" + """already_exists.""" + + PERMISSION_DENIED = "permission_denied" + """permission_denied.""" + + RESOURCE_EXHAUSTED = "resource_exhausted" + """resource_exhausted.""" + + FAILED_PRECONDITION = "failed_precondition" + """failed_precondition.""" + + ABORTED = "aborted" + """aborted.""" + + OUT_OF_RANGE = "out_of_range" + """out_of_range.""" + + UNIMPLEMENTED = "unimplemented" + """unimplemented.""" + + INTERNAL = "internal" + """internal.""" + + UNAVAILABLE = "unavailable" + """unavailable.""" + + DATA_LOSS = "data_loss" + """data_loss.""" + + UNAUTHENTICATED = "unauthenticated" + """unauthenticated.""" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/version/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/version/__init__.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/version/__init__.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/trace/__init__.py new file mode 100644 index 00000000..73087e95 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/__init__.py @@ -0,0 +1,646 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The OpenTelemetry tracing API describes the classes used to generate +distributed traces. + +The :class:`.Tracer` class controls access to the execution context, and +manages span creation. Each operation in a trace is represented by a +:class:`.Span`, which records the start, end time, and metadata associated with +the operation. + +This module provides abstract (i.e. unimplemented) classes required for +tracing, and a concrete no-op :class:`.NonRecordingSpan` that allows applications +to use the API package alone without a supporting implementation. + +To get a tracer, you need to provide the package name from which you are +calling the tracer APIs to OpenTelemetry by calling `TracerProvider.get_tracer` +with the calling module name and the version of your package. + +The tracer supports creating spans that are "attached" or "detached" from the +context. New spans are "attached" to the context in that they are +created as children of the currently active span, and the newly-created span +can optionally become the new active span:: + + from opentelemetry import trace + + tracer = trace.get_tracer(__name__) + + # Create a new root span, set it as the current span in context + with tracer.start_as_current_span("parent"): + # Attach a new child and update the current span + with tracer.start_as_current_span("child"): + do_work(): + # Close child span, set parent as current + # Close parent span, set default span as current + +When creating a span that's "detached" from the context the active span doesn't +change, and the caller is responsible for managing the span's lifetime:: + + # Explicit parent span assignment is done via the Context + from opentelemetry.trace import set_span_in_context + + context = set_span_in_context(parent) + child = tracer.start_span("child", context=context) + + try: + do_work(span=child) + finally: + child.end() + +Applications should generally use a single global TracerProvider, and use +either implicit or explicit context propagation consistently throughout. + +.. versionadded:: 0.1.0 +.. versionchanged:: 0.3.0 + `TracerProvider` was introduced and the global ``tracer`` getter was + replaced by ``tracer_provider``. +.. versionchanged:: 0.5.0 + ``tracer_provider`` was replaced by `get_tracer_provider`, + ``set_preferred_tracer_provider_implementation`` was replaced by + `set_tracer_provider`. +""" + +import os +import typing +from abc import ABC, abstractmethod +from enum import Enum +from logging import getLogger +from typing import Iterator, Optional, Sequence, cast + +from deprecated import deprecated + +from opentelemetry import context as context_api +from opentelemetry.attributes import BoundedAttributes +from opentelemetry.context.context import Context +from opentelemetry.environment_variables import OTEL_PYTHON_TRACER_PROVIDER +from opentelemetry.trace.propagation import ( + _SPAN_KEY, + get_current_span, + set_span_in_context, +) +from opentelemetry.trace.span import ( + DEFAULT_TRACE_OPTIONS, + DEFAULT_TRACE_STATE, + INVALID_SPAN, + INVALID_SPAN_CONTEXT, + INVALID_SPAN_ID, + INVALID_TRACE_ID, + NonRecordingSpan, + Span, + SpanContext, + TraceFlags, + TraceState, + format_span_id, + format_trace_id, +) +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.util import types +from opentelemetry.util._decorator import _agnosticcontextmanager +from opentelemetry.util._once import Once +from opentelemetry.util._providers import _load_provider + +logger = getLogger(__name__) + + +class _LinkBase(ABC): + def __init__(self, context: "SpanContext") -> None: + self._context = context + + @property + def context(self) -> "SpanContext": + return self._context + + @property + @abstractmethod + def attributes(self) -> types.Attributes: + pass + + +class Link(_LinkBase): + """A link to a `Span`. The attributes of a Link are immutable. + + Args: + context: `SpanContext` of the `Span` to link to. + attributes: Link's attributes. + """ + + def __init__( + self, + context: "SpanContext", + attributes: types.Attributes = None, + ) -> None: + super().__init__(context) + self._attributes = attributes + + @property + def attributes(self) -> types.Attributes: + return self._attributes + + @property + def dropped_attributes(self) -> int: + if isinstance(self._attributes, BoundedAttributes): + return self._attributes.dropped + return 0 + + +_Links = Optional[Sequence[Link]] + + +class SpanKind(Enum): + """Specifies additional details on how this span relates to its parent span. + + Note that this enumeration is experimental and likely to change. See + https://github.com/open-telemetry/opentelemetry-specification/pull/226. + """ + + #: Default value. Indicates that the span is used internally in the + # application. + INTERNAL = 0 + + #: Indicates that the span describes an operation that handles a remote + # request. + SERVER = 1 + + #: Indicates that the span describes a request to some remote service. + CLIENT = 2 + + #: Indicates that the span describes a producer sending a message to a + #: broker. Unlike client and server, there is usually no direct critical + #: path latency relationship between producer and consumer spans. + PRODUCER = 3 + + #: Indicates that the span describes a consumer receiving a message from a + #: broker. Unlike client and server, there is usually no direct critical + #: path latency relationship between producer and consumer spans. + CONSUMER = 4 + + +class TracerProvider(ABC): + @abstractmethod + def get_tracer( + self, + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, + ) -> "Tracer": + """Returns a `Tracer` for use by the given instrumentation library. + + For any two calls it is undefined whether the same or different + `Tracer` instances are returned, even for different library names. + + This function may return different `Tracer` types (e.g. a no-op tracer + vs. a functional tracer). + + Args: + instrumenting_module_name: The uniquely identifiable name for instrumentation + scope, such as instrumentation library, package, module or class name. + ``__name__`` may not be used as this can result in + different tracer names if the tracers are in different files. + It is better to use a fixed string that can be imported where + needed and used consistently as the name of the tracer. + + This should *not* be the name of the module that is + instrumented but the name of the module doing the instrumentation. + E.g., instead of ``"requests"``, use + ``"opentelemetry.instrumentation.requests"``. + + instrumenting_library_version: Optional. The version string of the + instrumenting library. Usually this should be the same as + ``importlib.metadata.version(instrumenting_library_name)``. + + schema_url: Optional. Specifies the Schema URL of the emitted telemetry. + attributes: Optional. Specifies the attributes of the emitted telemetry. + """ + + +class NoOpTracerProvider(TracerProvider): + """The default TracerProvider, used when no implementation is available. + + All operations are no-op. + """ + + def get_tracer( + self, + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, + ) -> "Tracer": + # pylint:disable=no-self-use,unused-argument + return NoOpTracer() + + +@deprecated(version="1.9.0", reason="You should use NoOpTracerProvider") # type: ignore +class _DefaultTracerProvider(NoOpTracerProvider): + """The default TracerProvider, used when no implementation is available. + + All operations are no-op. + """ + + +class ProxyTracerProvider(TracerProvider): + def get_tracer( + self, + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, + ) -> "Tracer": + if _TRACER_PROVIDER: + return _TRACER_PROVIDER.get_tracer( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, + ) + return ProxyTracer( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, + ) + + +class Tracer(ABC): + """Handles span creation and in-process context propagation. + + This class provides methods for manipulating the context, creating spans, + and controlling spans' lifecycles. + """ + + @abstractmethod + def start_span( + self, + name: str, + context: Optional[Context] = None, + kind: SpanKind = SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: _Links = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + ) -> "Span": + """Starts a span. + + Create a new span. Start the span without setting it as the current + span in the context. To start the span and use the context in a single + method, see :meth:`start_as_current_span`. + + By default the current span in the context will be used as parent, but an + explicit context can also be specified, by passing in a `Context` containing + a current `Span`. If there is no current span in the global `Context` or in + the specified context, the created span will be a root span. + + The span can be used as a context manager. On exiting the context manager, + the span's end() method will be called. + + Example:: + + # trace.get_current_span() will be used as the implicit parent. + # If none is found, the created span will be a root instance. + with tracer.start_span("one") as child: + child.add_event("child's event") + + Args: + name: The name of the span to be created. + context: An optional Context containing the span's parent. Defaults to the + global context. + kind: The span's kind (relationship to parent). Note that is + meaningful even if there is no parent. + attributes: The span's attributes. + links: Links span to other spans + start_time: Sets the start time of a span + record_exception: Whether to record any exceptions raised within the + context as error event on the span. + set_status_on_exception: Only relevant if the returned span is used + in a with/context manager. Defines whether the span status will + be automatically set to ERROR when an uncaught exception is + raised in the span with block. The span status won't be set by + this mechanism if it was previously set manually. + + Returns: + The newly-created span. + """ + + @_agnosticcontextmanager + @abstractmethod + def start_as_current_span( + self, + name: str, + context: Optional[Context] = None, + kind: SpanKind = SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: _Links = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + end_on_exit: bool = True, + ) -> Iterator["Span"]: + """Context manager for creating a new span and set it + as the current span in this tracer's context. + + Exiting the context manager will call the span's end method, + as well as return the current span to its previous value by + returning to the previous context. + + Example:: + + with tracer.start_as_current_span("one") as parent: + parent.add_event("parent's event") + with tracer.start_as_current_span("two") as child: + child.add_event("child's event") + trace.get_current_span() # returns child + trace.get_current_span() # returns parent + trace.get_current_span() # returns previously active span + + This is a convenience method for creating spans attached to the + tracer's context. Applications that need more control over the span + lifetime should use :meth:`start_span` instead. For example:: + + with tracer.start_as_current_span(name) as span: + do_work() + + is equivalent to:: + + span = tracer.start_span(name) + with opentelemetry.trace.use_span(span, end_on_exit=True): + do_work() + + This can also be used as a decorator:: + + @tracer.start_as_current_span("name") + def function(): + ... + + function() + + Args: + name: The name of the span to be created. + context: An optional Context containing the span's parent. Defaults to the + global context. + kind: The span's kind (relationship to parent). Note that is + meaningful even if there is no parent. + attributes: The span's attributes. + links: Links span to other spans + start_time: Sets the start time of a span + record_exception: Whether to record any exceptions raised within the + context as error event on the span. + set_status_on_exception: Only relevant if the returned span is used + in a with/context manager. Defines whether the span status will + be automatically set to ERROR when an uncaught exception is + raised in the span with block. The span status won't be set by + this mechanism if it was previously set manually. + end_on_exit: Whether to end the span automatically when leaving the + context manager. + + Yields: + The newly-created span. + """ + + +class ProxyTracer(Tracer): + # pylint: disable=W0222,signature-differs + def __init__( + self, + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, + ): + self._instrumenting_module_name = instrumenting_module_name + self._instrumenting_library_version = instrumenting_library_version + self._schema_url = schema_url + self._attributes = attributes + self._real_tracer: Optional[Tracer] = None + self._noop_tracer = NoOpTracer() + + @property + def _tracer(self) -> Tracer: + if self._real_tracer: + return self._real_tracer + + if _TRACER_PROVIDER: + self._real_tracer = _TRACER_PROVIDER.get_tracer( + self._instrumenting_module_name, + self._instrumenting_library_version, + self._schema_url, + self._attributes, + ) + return self._real_tracer + return self._noop_tracer + + def start_span(self, *args, **kwargs) -> Span: # type: ignore + return self._tracer.start_span(*args, **kwargs) # type: ignore + + @_agnosticcontextmanager # type: ignore + def start_as_current_span(self, *args, **kwargs) -> Iterator[Span]: + with self._tracer.start_as_current_span(*args, **kwargs) as span: # type: ignore + yield span + + +class NoOpTracer(Tracer): + """The default Tracer, used when no Tracer implementation is available. + + All operations are no-op. + """ + + def start_span( + self, + name: str, + context: Optional[Context] = None, + kind: SpanKind = SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: _Links = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + ) -> "Span": + return INVALID_SPAN + + @_agnosticcontextmanager + def start_as_current_span( + self, + name: str, + context: Optional[Context] = None, + kind: SpanKind = SpanKind.INTERNAL, + attributes: types.Attributes = None, + links: _Links = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + end_on_exit: bool = True, + ) -> Iterator["Span"]: + yield INVALID_SPAN + + +@deprecated(version="1.9.0", reason="You should use NoOpTracer") # type: ignore +class _DefaultTracer(NoOpTracer): + """The default Tracer, used when no Tracer implementation is available. + + All operations are no-op. + """ + + +_TRACER_PROVIDER_SET_ONCE = Once() +_TRACER_PROVIDER: Optional[TracerProvider] = None +_PROXY_TRACER_PROVIDER = ProxyTracerProvider() + + +def get_tracer( + instrumenting_module_name: str, + instrumenting_library_version: typing.Optional[str] = None, + tracer_provider: Optional[TracerProvider] = None, + schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, +) -> "Tracer": + """Returns a `Tracer` for use by the given instrumentation library. + + This function is a convenience wrapper for + opentelemetry.trace.TracerProvider.get_tracer. + + If tracer_provider is omitted the current configured one is used. + """ + if tracer_provider is None: + tracer_provider = get_tracer_provider() + return tracer_provider.get_tracer( + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, + ) + + +def _set_tracer_provider(tracer_provider: TracerProvider, log: bool) -> None: + def set_tp() -> None: + global _TRACER_PROVIDER # pylint: disable=global-statement + _TRACER_PROVIDER = tracer_provider + + did_set = _TRACER_PROVIDER_SET_ONCE.do_once(set_tp) + + if log and not did_set: + logger.warning("Overriding of current TracerProvider is not allowed") + + +def set_tracer_provider(tracer_provider: TracerProvider) -> None: + """Sets the current global :class:`~.TracerProvider` object. + + This can only be done once, a warning will be logged if any further attempt + is made. + """ + _set_tracer_provider(tracer_provider, log=True) + + +def get_tracer_provider() -> TracerProvider: + """Gets the current global :class:`~.TracerProvider` object.""" + if _TRACER_PROVIDER is None: + # if a global tracer provider has not been set either via code or env + # vars, return a proxy tracer provider + if OTEL_PYTHON_TRACER_PROVIDER not in os.environ: + return _PROXY_TRACER_PROVIDER + + tracer_provider: TracerProvider = _load_provider( + OTEL_PYTHON_TRACER_PROVIDER, "tracer_provider" + ) + _set_tracer_provider(tracer_provider, log=False) + # _TRACER_PROVIDER will have been set by one thread + return cast("TracerProvider", _TRACER_PROVIDER) + + +@_agnosticcontextmanager +def use_span( + span: Span, + end_on_exit: bool = False, + record_exception: bool = True, + set_status_on_exception: bool = True, +) -> Iterator[Span]: + """Takes a non-active span and activates it in the current context. + + Args: + span: The span that should be activated in the current context. + end_on_exit: Whether to end the span automatically when leaving the + context manager scope. + record_exception: Whether to record any exceptions raised within the + context as error event on the span. + set_status_on_exception: Only relevant if the returned span is used + in a with/context manager. Defines whether the span status will + be automatically set to ERROR when an uncaught exception is + raised in the span with block. The span status won't be set by + this mechanism if it was previously set manually. + """ + try: + token = context_api.attach(context_api.set_value(_SPAN_KEY, span)) + try: + yield span + finally: + context_api.detach(token) + + # Record only exceptions that inherit Exception class but not BaseException, because + # classes that directly inherit BaseException are not technically errors, e.g. GeneratorExit. + # See https://github.com/open-telemetry/opentelemetry-python/issues/4484 + except Exception as exc: # pylint: disable=broad-exception-caught + if isinstance(span, Span) and span.is_recording(): + # Record the exception as an event + if record_exception: + span.record_exception(exc) + + # Set status in case exception was raised + if set_status_on_exception: + span.set_status( + Status( + status_code=StatusCode.ERROR, + description=f"{type(exc).__name__}: {exc}", + ) + ) + + # This causes parent spans to set their status to ERROR and to record + # an exception as an event if a child span raises an exception even if + # such child span was started with both record_exception and + # set_status_on_exception attributes set to False. + raise + + finally: + if end_on_exit: + span.end() + + +__all__ = [ + "DEFAULT_TRACE_OPTIONS", + "DEFAULT_TRACE_STATE", + "INVALID_SPAN", + "INVALID_SPAN_CONTEXT", + "INVALID_SPAN_ID", + "INVALID_TRACE_ID", + "NonRecordingSpan", + "Link", + "Span", + "SpanContext", + "SpanKind", + "TraceFlags", + "TraceState", + "TracerProvider", + "Tracer", + "format_span_id", + "format_trace_id", + "get_current_span", + "get_tracer", + "get_tracer_provider", + "set_tracer_provider", + "set_span_in_context", + "use_span", + "Status", + "StatusCode", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/__init__.py new file mode 100644 index 00000000..d3529e17 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/__init__.py @@ -0,0 +1,51 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from opentelemetry.context import create_key, get_value, set_value +from opentelemetry.context.context import Context +from opentelemetry.trace.span import INVALID_SPAN, Span + +SPAN_KEY = "current-span" +_SPAN_KEY = create_key("current-span") + + +def set_span_in_context( + span: Span, context: Optional[Context] = None +) -> Context: + """Set the span in the given context. + + Args: + span: The Span to set. + context: a Context object. if one is not passed, the + default current context is used instead. + """ + ctx = set_value(_SPAN_KEY, span, context=context) + return ctx + + +def get_current_span(context: Optional[Context] = None) -> Span: + """Retrieve the current span. + + Args: + context: A Context object. If one is not passed, the + default current context is used instead. + + Returns: + The Span set in the context if it exists. INVALID_SPAN otherwise. + """ + span = get_value(_SPAN_KEY, context=context) + if span is None or not isinstance(span, Span): + return INVALID_SPAN + return span diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/tracecontext.py b/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/tracecontext.py new file mode 100644 index 00000000..af16a08f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/propagation/tracecontext.py @@ -0,0 +1,118 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import re +import typing + +from opentelemetry import trace +from opentelemetry.context.context import Context +from opentelemetry.propagators import textmap +from opentelemetry.trace import format_span_id, format_trace_id +from opentelemetry.trace.span import TraceState + + +class TraceContextTextMapPropagator(textmap.TextMapPropagator): + """Extracts and injects using w3c TraceContext's headers.""" + + _TRACEPARENT_HEADER_NAME = "traceparent" + _TRACESTATE_HEADER_NAME = "tracestate" + _TRACEPARENT_HEADER_FORMAT = ( + "^[ \t]*([0-9a-f]{2})-([0-9a-f]{32})-([0-9a-f]{16})-([0-9a-f]{2})" + + "(-.*)?[ \t]*$" + ) + _TRACEPARENT_HEADER_FORMAT_RE = re.compile(_TRACEPARENT_HEADER_FORMAT) + + def extract( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, + ) -> Context: + """Extracts SpanContext from the carrier. + + See `opentelemetry.propagators.textmap.TextMapPropagator.extract` + """ + if context is None: + context = Context() + + header = getter.get(carrier, self._TRACEPARENT_HEADER_NAME) + + if not header: + return context + + match = re.search(self._TRACEPARENT_HEADER_FORMAT_RE, header[0]) + if not match: + return context + + version: str = match.group(1) + trace_id: str = match.group(2) + span_id: str = match.group(3) + trace_flags: str = match.group(4) + + if trace_id == "0" * 32 or span_id == "0" * 16: + return context + + if version == "00": + if match.group(5): # type: ignore + return context + if version == "ff": + return context + + tracestate_headers = getter.get(carrier, self._TRACESTATE_HEADER_NAME) + if tracestate_headers is None: + tracestate = None + else: + tracestate = TraceState.from_header(tracestate_headers) + + span_context = trace.SpanContext( + trace_id=int(trace_id, 16), + span_id=int(span_id, 16), + is_remote=True, + trace_flags=trace.TraceFlags(int(trace_flags, 16)), + trace_state=tracestate, + ) + return trace.set_span_in_context( + trace.NonRecordingSpan(span_context), context + ) + + def inject( + self, + carrier: textmap.CarrierT, + context: typing.Optional[Context] = None, + setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, + ) -> None: + """Injects SpanContext into the carrier. + + See `opentelemetry.propagators.textmap.TextMapPropagator.inject` + """ + span = trace.get_current_span(context) + span_context = span.get_span_context() + if span_context == trace.INVALID_SPAN_CONTEXT: + return + traceparent_string = f"00-{format_trace_id(span_context.trace_id)}-{format_span_id(span_context.span_id)}-{span_context.trace_flags:02x}" + setter.set(carrier, self._TRACEPARENT_HEADER_NAME, traceparent_string) + if span_context.trace_state: + tracestate_string = span_context.trace_state.to_header() + setter.set( + carrier, self._TRACESTATE_HEADER_NAME, tracestate_string + ) + + @property + def fields(self) -> typing.Set[str]: + """Returns a set with the fields set in `inject`. + + See + `opentelemetry.propagators.textmap.TextMapPropagator.fields` + """ + return {self._TRACEPARENT_HEADER_NAME, self._TRACESTATE_HEADER_NAME} diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/trace/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/span.py b/.venv/lib/python3.12/site-packages/opentelemetry/trace/span.py new file mode 100644 index 00000000..6e54dfc7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/span.py @@ -0,0 +1,608 @@ +import abc +import logging +import re +import types as python_types +import typing +import warnings + +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.util import types + +# The key MUST begin with a lowercase letter or a digit, +# and can only contain lowercase letters (a-z), digits (0-9), +# underscores (_), dashes (-), asterisks (*), and forward slashes (/). +# For multi-tenant vendor scenarios, an at sign (@) can be used to +# prefix the vendor name. Vendors SHOULD set the tenant ID +# at the beginning of the key. + +# key = ( lcalpha ) 0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +# key = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) "@" lcalpha 0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +# lcalpha = %x61-7A ; a-z + +_KEY_FORMAT = ( + r"[a-z][_0-9a-z\-\*\/]{0,255}|" + r"[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}" +) +_KEY_PATTERN = re.compile(_KEY_FORMAT) + +# The value is an opaque string containing up to 256 printable +# ASCII [RFC0020] characters (i.e., the range 0x20 to 0x7E) +# except comma (,) and (=). +# value = 0*255(chr) nblk-chr +# nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +# chr = %x20 / nblk-chr + +_VALUE_FORMAT = ( + r"[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]" +) +_VALUE_PATTERN = re.compile(_VALUE_FORMAT) + + +_TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS = 32 +_delimiter_pattern = re.compile(r"[ \t]*,[ \t]*") +_member_pattern = re.compile(f"({_KEY_FORMAT})(=)({_VALUE_FORMAT})[ \t]*") +_logger = logging.getLogger(__name__) + + +def _is_valid_pair(key: str, value: str) -> bool: + return ( + isinstance(key, str) + and _KEY_PATTERN.fullmatch(key) is not None + and isinstance(value, str) + and _VALUE_PATTERN.fullmatch(value) is not None + ) + + +class Span(abc.ABC): + """A span represents a single operation within a trace.""" + + @abc.abstractmethod + def end(self, end_time: typing.Optional[int] = None) -> None: + """Sets the current time as the span's end time. + + The span's end time is the wall time at which the operation finished. + + Only the first call to `end` should modify the span, and + implementations are free to ignore or raise on further calls. + """ + + @abc.abstractmethod + def get_span_context(self) -> "SpanContext": + """Gets the span's SpanContext. + + Get an immutable, serializable identifier for this span that can be + used to create new child spans. + + Returns: + A :class:`opentelemetry.trace.SpanContext` with a copy of this span's immutable state. + """ + + @abc.abstractmethod + def set_attributes( + self, attributes: typing.Mapping[str, types.AttributeValue] + ) -> None: + """Sets Attributes. + + Sets Attributes with the key and value passed as arguments dict. + + Note: The behavior of `None` value attributes is undefined, and hence + strongly discouraged. It is also preferred to set attributes at span + creation, instead of calling this method later since samplers can only + consider information already present during span creation. + """ + + @abc.abstractmethod + def set_attribute(self, key: str, value: types.AttributeValue) -> None: + """Sets an Attribute. + + Sets a single Attribute with the key and value passed as arguments. + + Note: The behavior of `None` value attributes is undefined, and hence + strongly discouraged. It is also preferred to set attributes at span + creation, instead of calling this method later since samplers can only + consider information already present during span creation. + """ + + @abc.abstractmethod + def add_event( + self, + name: str, + attributes: types.Attributes = None, + timestamp: typing.Optional[int] = None, + ) -> None: + """Adds an `Event`. + + Adds a single `Event` with the name and, optionally, a timestamp and + attributes passed as arguments. Implementations should generate a + timestamp if the `timestamp` argument is omitted. + """ + + def add_link( # pylint: disable=no-self-use + self, + context: "SpanContext", + attributes: types.Attributes = None, + ) -> None: + """Adds a `Link`. + + Adds a single `Link` with the `SpanContext` of the span to link to and, + optionally, attributes passed as arguments. Implementations may ignore + calls with an invalid span context if both attributes and TraceState + are empty. + + Note: It is preferred to add links at span creation, instead of calling + this method later since samplers can only consider information already + present during span creation. + """ + warnings.warn( + "Span.add_link() not implemented and will be a no-op. " + "Use opentelemetry-sdk >= 1.23 to add links after span creation" + ) + + @abc.abstractmethod + def update_name(self, name: str) -> None: + """Updates the `Span` name. + + This will override the name provided via :func:`opentelemetry.trace.Tracer.start_span`. + + Upon this update, any sampling behavior based on Span name will depend + on the implementation. + """ + + @abc.abstractmethod + def is_recording(self) -> bool: + """Returns whether this span will be recorded. + + Returns true if this Span is active and recording information like + events with the add_event operation and attributes using set_attribute. + """ + + @abc.abstractmethod + def set_status( + self, + status: typing.Union[Status, StatusCode], + description: typing.Optional[str] = None, + ) -> None: + """Sets the Status of the Span. If used, this will override the default + Span status. + """ + + @abc.abstractmethod + def record_exception( + self, + exception: BaseException, + attributes: types.Attributes = None, + timestamp: typing.Optional[int] = None, + escaped: bool = False, + ) -> None: + """Records an exception as a span event.""" + + def __enter__(self) -> "Span": + """Invoked when `Span` is used as a context manager. + + Returns the `Span` itself. + """ + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]], + exc_val: typing.Optional[BaseException], + exc_tb: typing.Optional[python_types.TracebackType], + ) -> None: + """Ends context manager and calls `end` on the `Span`.""" + + self.end() + + +class TraceFlags(int): + """A bitmask that represents options specific to the trace. + + The only supported option is the "sampled" flag (``0x01``). If set, this + flag indicates that the trace may have been sampled upstream. + + See the `W3C Trace Context - Traceparent`_ spec for details. + + .. _W3C Trace Context - Traceparent: + https://www.w3.org/TR/trace-context/#trace-flags + """ + + DEFAULT = 0x00 + SAMPLED = 0x01 + + @classmethod + def get_default(cls) -> "TraceFlags": + return cls(cls.DEFAULT) + + @property + def sampled(self) -> bool: + return bool(self & TraceFlags.SAMPLED) + + +DEFAULT_TRACE_OPTIONS = TraceFlags.get_default() + + +class TraceState(typing.Mapping[str, str]): + """A list of key-value pairs representing vendor-specific trace info. + + Keys and values are strings of up to 256 printable US-ASCII characters. + Implementations should conform to the `W3C Trace Context - Tracestate`_ + spec, which describes additional restrictions on valid field values. + + .. _W3C Trace Context - Tracestate: + https://www.w3.org/TR/trace-context/#tracestate-field + """ + + def __init__( + self, + entries: typing.Optional[ + typing.Sequence[typing.Tuple[str, str]] + ] = None, + ) -> None: + self._dict = {} # type: dict[str, str] + if entries is None: + return + if len(entries) > _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS: + _logger.warning( + "There can't be more than %s key/value pairs.", + _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS, + ) + return + + for key, value in entries: + if _is_valid_pair(key, value): + if key in self._dict: + _logger.warning("Duplicate key: %s found.", key) + continue + self._dict[key] = value + else: + _logger.warning( + "Invalid key/value pair (%s, %s) found.", key, value + ) + + def __contains__(self, item: object) -> bool: + return item in self._dict + + def __getitem__(self, key: str) -> str: + return self._dict[key] + + def __iter__(self) -> typing.Iterator[str]: + return iter(self._dict) + + def __len__(self) -> int: + return len(self._dict) + + def __repr__(self) -> str: + pairs = [ + f"{{key={key}, value={value}}}" + for key, value in self._dict.items() + ] + return str(pairs) + + def add(self, key: str, value: str) -> "TraceState": + """Adds a key-value pair to tracestate. The provided pair should + adhere to w3c tracestate identifiers format. + + Args: + key: A valid tracestate key to add + value: A valid tracestate value to add + + Returns: + A new TraceState with the modifications applied. + + If the provided key-value pair is invalid or results in tracestate + that violates tracecontext specification, they are discarded and + same tracestate will be returned. + """ + if not _is_valid_pair(key, value): + _logger.warning( + "Invalid key/value pair (%s, %s) found.", key, value + ) + return self + # There can be a maximum of 32 pairs + if len(self) >= _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS: + _logger.warning("There can't be more 32 key/value pairs.") + return self + # Duplicate entries are not allowed + if key in self._dict: + _logger.warning("The provided key %s already exists.", key) + return self + new_state = [(key, value)] + list(self._dict.items()) + return TraceState(new_state) + + def update(self, key: str, value: str) -> "TraceState": + """Updates a key-value pair in tracestate. The provided pair should + adhere to w3c tracestate identifiers format. + + Args: + key: A valid tracestate key to update + value: A valid tracestate value to update for key + + Returns: + A new TraceState with the modifications applied. + + If the provided key-value pair is invalid or results in tracestate + that violates tracecontext specification, they are discarded and + same tracestate will be returned. + """ + if not _is_valid_pair(key, value): + _logger.warning( + "Invalid key/value pair (%s, %s) found.", key, value + ) + return self + prev_state = self._dict.copy() + prev_state.pop(key, None) + new_state = [(key, value), *prev_state.items()] + return TraceState(new_state) + + def delete(self, key: str) -> "TraceState": + """Deletes a key-value from tracestate. + + Args: + key: A valid tracestate key to remove key-value pair from tracestate + + Returns: + A new TraceState with the modifications applied. + + If the provided key-value pair is invalid or results in tracestate + that violates tracecontext specification, they are discarded and + same tracestate will be returned. + """ + if key not in self._dict: + _logger.warning("The provided key %s doesn't exist.", key) + return self + prev_state = self._dict.copy() + prev_state.pop(key) + new_state = list(prev_state.items()) + return TraceState(new_state) + + def to_header(self) -> str: + """Creates a w3c tracestate header from a TraceState. + + Returns: + A string that adheres to the w3c tracestate + header format. + """ + return ",".join(key + "=" + value for key, value in self._dict.items()) + + @classmethod + def from_header(cls, header_list: typing.List[str]) -> "TraceState": + """Parses one or more w3c tracestate header into a TraceState. + + Args: + header_list: one or more w3c tracestate headers. + + Returns: + A valid TraceState that contains values extracted from + the tracestate header. + + If the format of one headers is illegal, all values will + be discarded and an empty tracestate will be returned. + + If the number of keys is beyond the maximum, all values + will be discarded and an empty tracestate will be returned. + """ + pairs = {} # type: dict[str, str] + for header in header_list: + members: typing.List[str] = re.split(_delimiter_pattern, header) + for member in members: + # empty members are valid, but no need to process further. + if not member: + continue + match = _member_pattern.fullmatch(member) + if not match: + _logger.warning( + "Member doesn't match the w3c identifiers format %s", + member, + ) + return cls() + groups: typing.Tuple[str, ...] = match.groups() + key, _eq, value = groups + # duplicate keys are not legal in header + if key in pairs: + return cls() + pairs[key] = value + return cls(list(pairs.items())) + + @classmethod + def get_default(cls) -> "TraceState": + return cls() + + def keys(self) -> typing.KeysView[str]: + return self._dict.keys() + + def items(self) -> typing.ItemsView[str, str]: + return self._dict.items() + + def values(self) -> typing.ValuesView[str]: + return self._dict.values() + + +DEFAULT_TRACE_STATE = TraceState.get_default() +_TRACE_ID_MAX_VALUE = 2**128 - 1 +_SPAN_ID_MAX_VALUE = 2**64 - 1 + + +class SpanContext( + typing.Tuple[int, int, bool, "TraceFlags", "TraceState", bool] +): + """The state of a Span to propagate between processes. + + This class includes the immutable attributes of a :class:`.Span` that must + be propagated to a span's children and across process boundaries. + + Args: + trace_id: The ID of the trace that this span belongs to. + span_id: This span's ID. + is_remote: True if propagated from a remote parent. + trace_flags: Trace options to propagate. + trace_state: Tracing-system-specific info to propagate. + """ + + def __new__( + cls, + trace_id: int, + span_id: int, + is_remote: bool, + trace_flags: typing.Optional["TraceFlags"] = DEFAULT_TRACE_OPTIONS, + trace_state: typing.Optional["TraceState"] = DEFAULT_TRACE_STATE, + ) -> "SpanContext": + if trace_flags is None: + trace_flags = DEFAULT_TRACE_OPTIONS + if trace_state is None: + trace_state = DEFAULT_TRACE_STATE + + is_valid = ( + INVALID_TRACE_ID < trace_id <= _TRACE_ID_MAX_VALUE + and INVALID_SPAN_ID < span_id <= _SPAN_ID_MAX_VALUE + ) + + return tuple.__new__( + cls, + (trace_id, span_id, is_remote, trace_flags, trace_state, is_valid), + ) + + def __getnewargs__( + self, + ) -> typing.Tuple[int, int, bool, "TraceFlags", "TraceState"]: + return ( + self.trace_id, + self.span_id, + self.is_remote, + self.trace_flags, + self.trace_state, + ) + + @property + def trace_id(self) -> int: + return self[0] # pylint: disable=unsubscriptable-object + + @property + def span_id(self) -> int: + return self[1] # pylint: disable=unsubscriptable-object + + @property + def is_remote(self) -> bool: + return self[2] # pylint: disable=unsubscriptable-object + + @property + def trace_flags(self) -> "TraceFlags": + return self[3] # pylint: disable=unsubscriptable-object + + @property + def trace_state(self) -> "TraceState": + return self[4] # pylint: disable=unsubscriptable-object + + @property + def is_valid(self) -> bool: + return self[5] # pylint: disable=unsubscriptable-object + + def __setattr__(self, *args: str) -> None: + _logger.debug( + "Immutable type, ignoring call to set attribute", stack_info=True + ) + + def __delattr__(self, *args: str) -> None: + _logger.debug( + "Immutable type, ignoring call to set attribute", stack_info=True + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}(trace_id=0x{format_trace_id(self.trace_id)}, span_id=0x{format_span_id(self.span_id)}, trace_flags=0x{self.trace_flags:02x}, trace_state={self.trace_state!r}, is_remote={self.is_remote})" + + +class NonRecordingSpan(Span): + """The Span that is used when no Span implementation is available. + + All operations are no-op except context propagation. + """ + + def __init__(self, context: "SpanContext") -> None: + self._context = context + + def get_span_context(self) -> "SpanContext": + return self._context + + def is_recording(self) -> bool: + return False + + def end(self, end_time: typing.Optional[int] = None) -> None: + pass + + def set_attributes( + self, attributes: typing.Mapping[str, types.AttributeValue] + ) -> None: + pass + + def set_attribute(self, key: str, value: types.AttributeValue) -> None: + pass + + def add_event( + self, + name: str, + attributes: types.Attributes = None, + timestamp: typing.Optional[int] = None, + ) -> None: + pass + + def add_link( + self, + context: "SpanContext", + attributes: types.Attributes = None, + ) -> None: + pass + + def update_name(self, name: str) -> None: + pass + + def set_status( + self, + status: typing.Union[Status, StatusCode], + description: typing.Optional[str] = None, + ) -> None: + pass + + def record_exception( + self, + exception: BaseException, + attributes: types.Attributes = None, + timestamp: typing.Optional[int] = None, + escaped: bool = False, + ) -> None: + pass + + def __repr__(self) -> str: + return f"NonRecordingSpan({self._context!r})" + + +INVALID_SPAN_ID = 0x0000000000000000 +INVALID_TRACE_ID = 0x00000000000000000000000000000000 +INVALID_SPAN_CONTEXT = SpanContext( + trace_id=INVALID_TRACE_ID, + span_id=INVALID_SPAN_ID, + is_remote=False, + trace_flags=DEFAULT_TRACE_OPTIONS, + trace_state=DEFAULT_TRACE_STATE, +) +INVALID_SPAN = NonRecordingSpan(INVALID_SPAN_CONTEXT) + + +def format_trace_id(trace_id: int) -> str: + """Convenience trace ID formatting method + Args: + trace_id: Trace ID int + + Returns: + The trace ID as 32-byte hexadecimal string + """ + return format(trace_id, "032x") + + +def format_span_id(span_id: int) -> str: + """Convenience span ID formatting method + Args: + span_id: Span ID int + + Returns: + The span ID as 16-byte hexadecimal string + """ + return format(span_id, "016x") diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/trace/status.py b/.venv/lib/python3.12/site-packages/opentelemetry/trace/status.py new file mode 100644 index 00000000..ada7fa1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/trace/status.py @@ -0,0 +1,82 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import logging +import typing + +logger = logging.getLogger(__name__) + + +class StatusCode(enum.Enum): + """Represents the canonical set of status codes of a finished Span.""" + + UNSET = 0 + """The default status.""" + + OK = 1 + """The operation has been validated by an Application developer or Operator to have completed successfully.""" + + ERROR = 2 + """The operation contains an error.""" + + +class Status: + """Represents the status of a finished Span. + + Args: + status_code: The canonical status code that describes the result + status of the operation. + description: An optional description of the status. + """ + + def __init__( + self, + status_code: StatusCode = StatusCode.UNSET, + description: typing.Optional[str] = None, + ): + self._status_code = status_code + self._description = None + + if description: + if not isinstance(description, str): + logger.warning("Invalid status description type, expected str") + return + if status_code is not StatusCode.ERROR: + logger.warning( + "description should only be set when status_code is set to StatusCode.ERROR" + ) + return + + self._description = description + + @property + def status_code(self) -> StatusCode: + """Represents the canonical status code of a finished Span.""" + return self._status_code + + @property + def description(self) -> typing.Optional[str]: + """Status description""" + return self._description + + @property + def is_ok(self) -> bool: + """Returns false if this represents an error, true otherwise.""" + return self.is_unset or self._status_code is StatusCode.OK + + @property + def is_unset(self) -> bool: + """Returns true if unset, false otherwise.""" + return self._status_code is StatusCode.UNSET diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/_decorator.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/_decorator.py new file mode 100644 index 00000000..f574438f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/_decorator.py @@ -0,0 +1,85 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import contextlib +import functools +from typing import TYPE_CHECKING, Callable, Generic, Iterator, TypeVar + +V = TypeVar("V") +R = TypeVar("R") # Return type +Pargs = TypeVar("Pargs") # Generic type for arguments +Pkwargs = TypeVar("Pkwargs") # Generic type for arguments + +# We don't actually depend on typing_extensions but we can use it in CI with this conditional +# import. ParamSpec can be imported directly from typing after python 3.9 is dropped +# https://peps.python.org/pep-0612/. +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") # Generic type for all arguments + + +class _AgnosticContextManager( + contextlib._GeneratorContextManager, # type: ignore # FIXME use contextlib._GeneratorContextManager[R] when we drop the python 3.8 support + Generic[R], +): # pylint: disable=protected-access + """Context manager that can decorate both async and sync functions. + + This is an overridden version of the contextlib._GeneratorContextManager + class that will decorate async functions with an async context manager + to end the span AFTER the entire async function coroutine finishes. + + Else it will report near zero spans durations for async functions. + + We are overriding the contextlib._GeneratorContextManager class as + reimplementing it is a lot of code to maintain and this class (even if it's + marked as protected) doesn't seems like to be evolving a lot. + + For more information, see: + https://github.com/open-telemetry/opentelemetry-python/pull/3633 + """ + + def __enter__(self) -> R: + """Reimplementing __enter__ to avoid the type error. + + The original __enter__ method returns Any type, but we want to return R. + """ + del self.args, self.kwds, self.func # type: ignore + try: + return next(self.gen) # type: ignore + except StopIteration: + raise RuntimeError("generator didn't yield") from None + + def __call__(self, func: V) -> V: # pyright: ignore [reportIncompatibleMethodOverride] + if asyncio.iscoroutinefunction(func): + + @functools.wraps(func) # type: ignore + async def async_wrapper(*args: Pargs, **kwargs: Pkwargs) -> R: # pyright: ignore [reportInvalidTypeVarUse] + with self._recreate_cm(): # type: ignore + return await func(*args, **kwargs) # type: ignore + + return async_wrapper # type: ignore + return super().__call__(func) # type: ignore + + +def _agnosticcontextmanager( + func: "Callable[P, Iterator[R]]", +) -> "Callable[P, _AgnosticContextManager[R]]": + @functools.wraps(func) + def helper(*args: Pargs, **kwargs: Pkwargs) -> _AgnosticContextManager[R]: # pyright: ignore [reportInvalidTypeVarUse] + return _AgnosticContextManager(func, args, kwargs) # pyright: ignore [reportArgumentType] + + # Ignoring the type to keep the original signature of the function + return helper # type: ignore[return-value] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/_importlib_metadata.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/_importlib_metadata.py new file mode 100644 index 00000000..2457630b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/_importlib_metadata.py @@ -0,0 +1,37 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# FIXME: Use importlib.metadata when support for 3.11 is dropped if the rest of +# the supported versions at that time have the same API. +from importlib_metadata import ( # type: ignore + Distribution, + EntryPoint, + EntryPoints, + PackageNotFoundError, + distributions, + entry_points, + requires, + version, +) + +__all__ = [ + "entry_points", + "version", + "EntryPoint", + "EntryPoints", + "requires", + "Distribution", + "distributions", + "PackageNotFoundError", +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/_once.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/_once.py new file mode 100644 index 00000000..c0cee43a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/_once.py @@ -0,0 +1,47 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from threading import Lock +from typing import Callable + + +class Once: + """Execute a function exactly once and block all callers until the function returns + + Same as golang's `sync.Once <https://pkg.go.dev/sync#Once>`_ + """ + + def __init__(self) -> None: + self._lock = Lock() + self._done = False + + def do_once(self, func: Callable[[], None]) -> bool: + """Execute ``func`` if it hasn't been executed or return. + + Will block until ``func`` has been called by one thread. + + Returns: + Whether or not ``func`` was executed in this call + """ + + # fast path, try to avoid locking + if self._done: + return False + + with self._lock: + if not self._done: + func() + self._done = True + return True + return False diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/_providers.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/_providers.py new file mode 100644 index 00000000..b748eadf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/_providers.py @@ -0,0 +1,52 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from os import environ +from typing import TYPE_CHECKING, TypeVar, cast + +from opentelemetry.util._importlib_metadata import entry_points + +if TYPE_CHECKING: + from opentelemetry.metrics import MeterProvider + from opentelemetry.trace import TracerProvider + +Provider = TypeVar("Provider", "TracerProvider", "MeterProvider") + +logger = getLogger(__name__) + + +def _load_provider( + provider_environment_variable: str, provider: str +) -> Provider: # type: ignore[type-var] + try: + provider_name = cast( + str, + environ.get(provider_environment_variable, f"default_{provider}"), + ) + + return cast( + Provider, + next( # type: ignore + iter( # type: ignore + entry_points( # type: ignore + group=f"opentelemetry_{provider}", + name=provider_name, + ) + ) + ).load()(), + ) + except Exception: # pylint: disable=broad-exception-caught + logger.exception("Failed to load configured provider %s", provider) + raise diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/http/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/__init__.py new file mode 100644 index 00000000..71a6403a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/__init__.py @@ -0,0 +1,257 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from collections.abc import Mapping +from os import environ +from re import IGNORECASE as RE_IGNORECASE +from re import compile as re_compile +from re import search +from typing import Callable, Iterable, overload +from urllib.parse import urlparse, urlunparse + +from opentelemetry.semconv.trace import SpanAttributes + +OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS = ( + "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS" +) +OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = ( + "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST" +) +OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE = ( + "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE" +) + +OTEL_PYTHON_INSTRUMENTATION_HTTP_CAPTURE_ALL_METHODS = ( + "OTEL_PYTHON_INSTRUMENTATION_HTTP_CAPTURE_ALL_METHODS" +) + +# List of recommended metrics attributes +_duration_attrs = { + SpanAttributes.HTTP_METHOD, + SpanAttributes.HTTP_HOST, + SpanAttributes.HTTP_SCHEME, + SpanAttributes.HTTP_STATUS_CODE, + SpanAttributes.HTTP_FLAVOR, + SpanAttributes.HTTP_SERVER_NAME, + SpanAttributes.NET_HOST_NAME, + SpanAttributes.NET_HOST_PORT, +} + +_active_requests_count_attrs = { + SpanAttributes.HTTP_METHOD, + SpanAttributes.HTTP_HOST, + SpanAttributes.HTTP_SCHEME, + SpanAttributes.HTTP_FLAVOR, + SpanAttributes.HTTP_SERVER_NAME, +} + + +class ExcludeList: + """Class to exclude certain paths (given as a list of regexes) from tracing requests""" + + def __init__(self, excluded_urls: Iterable[str]): + self._excluded_urls = excluded_urls + if self._excluded_urls: + self._regex = re_compile("|".join(excluded_urls)) + + def url_disabled(self, url: str) -> bool: + return bool(self._excluded_urls and search(self._regex, url)) + + +class SanitizeValue: + """Class to sanitize (remove sensitive data from) certain headers (given as a list of regexes)""" + + def __init__(self, sanitized_fields: Iterable[str]): + self._sanitized_fields = sanitized_fields + if self._sanitized_fields: + self._regex = re_compile("|".join(sanitized_fields), RE_IGNORECASE) + + def sanitize_header_value(self, header: str, value: str) -> str: + return ( + "[REDACTED]" + if (self._sanitized_fields and search(self._regex, header)) + else value + ) + + def sanitize_header_values( + self, + headers: Mapping[str, str | list[str]], + header_regexes: list[str], + normalize_function: Callable[[str], str], + ) -> dict[str, list[str]]: + values: dict[str, list[str]] = {} + + if header_regexes: + header_regexes_compiled = re_compile( + "|".join(header_regexes), + RE_IGNORECASE, + ) + + for header_name, header_value in headers.items(): + if header_regexes_compiled.fullmatch(header_name): + key = normalize_function(header_name.lower()) + if isinstance(header_value, str): + values[key] = [ + self.sanitize_header_value( + header_name, header_value + ) + ] + else: + values[key] = [ + self.sanitize_header_value(header_name, value) + for value in header_value + ] + + return values + + +_root = r"OTEL_PYTHON_{}" + + +def get_traced_request_attrs(instrumentation: str) -> list[str]: + traced_request_attrs = environ.get( + _root.format(f"{instrumentation}_TRACED_REQUEST_ATTRS") + ) + if traced_request_attrs: + return [ + traced_request_attr.strip() + for traced_request_attr in traced_request_attrs.split(",") + ] + return [] + + +def get_excluded_urls(instrumentation: str) -> ExcludeList: + # Get instrumentation-specific excluded URLs. If not set, retrieve them + # from generic variable. + excluded_urls = environ.get( + _root.format(f"{instrumentation}_EXCLUDED_URLS"), + environ.get(_root.format("EXCLUDED_URLS"), ""), + ) + + return parse_excluded_urls(excluded_urls) + + +def parse_excluded_urls(excluded_urls: str) -> ExcludeList: + """ + Small helper to put an arbitrary url list inside an ExcludeList + """ + if excluded_urls: + excluded_url_list = [ + excluded_url.strip() for excluded_url in excluded_urls.split(",") + ] + else: + excluded_url_list = [] + + return ExcludeList(excluded_url_list) + + +def remove_url_credentials(url: str) -> str: + """Given a string url, remove the username and password only if it is a valid url""" + + try: + parsed = urlparse(url) + if all([parsed.scheme, parsed.netloc]): # checks for valid url + parsed_url = urlparse(url) + _, _, netloc = parsed.netloc.rpartition("@") + return urlunparse( + ( + parsed_url.scheme, + netloc, + parsed_url.path, + parsed_url.params, + parsed_url.query, + parsed_url.fragment, + ) + ) + except ValueError: # an unparsable url was passed + pass + return url + + +def normalise_request_header_name(header: str) -> str: + key = header.lower().replace("-", "_") + return f"http.request.header.{key}" + + +def normalise_response_header_name(header: str) -> str: + key = header.lower().replace("-", "_") + return f"http.response.header.{key}" + + +@overload +def sanitize_method(method: str) -> str: ... + + +@overload +def sanitize_method(method: None) -> None: ... + + +def sanitize_method(method: str | None) -> str | None: + if method is None: + return None + method = method.upper() + if ( + environ.get(OTEL_PYTHON_INSTRUMENTATION_HTTP_CAPTURE_ALL_METHODS) + or + # Based on https://www.rfc-editor.org/rfc/rfc7231#section-4.1 and https://www.rfc-editor.org/rfc/rfc5789#section-2. + method + in [ + "GET", + "HEAD", + "POST", + "PUT", + "DELETE", + "CONNECT", + "OPTIONS", + "TRACE", + "PATCH", + ] + ): + return method + return "_OTHER" + + +def get_custom_headers(env_var: str) -> list[str]: + custom_headers = environ.get(env_var, None) + if custom_headers: + return [ + custom_headers.strip() + for custom_headers in custom_headers.split(",") + ] + return [] + + +def _parse_active_request_count_attrs(req_attrs): + active_requests_count_attrs = { + key: req_attrs[key] + for key in _active_requests_count_attrs.intersection(req_attrs.keys()) + } + return active_requests_count_attrs + + +def _parse_duration_attrs(req_attrs): + duration_attrs = { + key: req_attrs[key] + for key in _duration_attrs.intersection(req_attrs.keys()) + } + return duration_attrs + + +def _parse_url_query(url: str): + parsed_url = urlparse(url) + path = parsed_url.path + query_params = parsed_url.query + return path, query_params diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/http/httplib.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/httplib.py new file mode 100644 index 00000000..f375e2f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/httplib.py @@ -0,0 +1,195 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This library provides functionality to enrich HTTP client spans with IPs. It does +not create spans on its own. +""" + +from __future__ import annotations + +import contextlib +import http.client +import logging +import socket # pylint:disable=unused-import # Used for typing +import typing +from typing import Any, Callable, Collection, TypedDict, cast + +import wrapt + +from opentelemetry import context +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.utils import unwrap +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace.span import Span + +_STATE_KEY = "httpbase_instrumentation_state" + +logger = logging.getLogger(__name__) + +R = typing.TypeVar("R") + + +class HttpClientInstrumentor(BaseInstrumentor): + def instrumentation_dependencies(self) -> Collection[str]: + return () # This instruments http.client from stdlib; no extra deps. + + def _instrument(self, **kwargs: Any): + """Instruments the http.client module (not creating spans on its own)""" + _instrument() + + def _uninstrument(self, **kwargs: Any): + _uninstrument() + + +def _remove_nonrecording(spanlist: list[Span]) -> bool: + idx = len(spanlist) - 1 + while idx >= 0: + if not spanlist[idx].is_recording(): + logger.debug("Span is not recording: %s", spanlist[idx]) + islast = idx + 1 == len(spanlist) + if not islast: + spanlist[idx] = spanlist[len(spanlist) - 1] + spanlist.pop() + if islast: + if idx == 0: + return False # We removed everything + idx -= 1 + else: + idx -= 1 + return True + + +def trysetip( + conn: http.client.HTTPConnection, loglevel: int = logging.DEBUG +) -> bool: + """Tries to set the net.peer.ip semantic attribute on the current span from the given + HttpConnection. + + Returns False if the connection is not yet established, False if the IP was captured + or there is no need to capture it. + """ + + state = _getstate() + if not state: + return True + spanlist: typing.List[Span] = state.get("need_ip") + if not spanlist: + return True + + # Remove all non-recording spans from the list. + if not _remove_nonrecording(spanlist): + return True + + sock = "<property not accessed>" + try: + sock: typing.Optional[socket.socket] = conn.sock + logger.debug("Got socket: %s", sock) + if sock is None: + return False + addr = sock.getpeername() + if addr and addr[0]: + ip = addr[0] + except Exception: # pylint:disable=broad-except + logger.log( + loglevel, + "Failed to get peer address from %s", + sock, + exc_info=True, + stack_info=True, + ) + else: + for span in spanlist: + span.set_attribute(SpanAttributes.NET_PEER_IP, ip) + return True + + +def _instrumented_connect( + wrapped: Callable[..., R], + instance: http.client.HTTPConnection, + args: tuple[Any, ...], + kwargs: dict[str, Any], +) -> R: + result = wrapped(*args, **kwargs) + trysetip(instance, loglevel=logging.WARNING) + return result + + +def instrument_connect(module: type[Any], name: str = "connect"): + """Instrument additional connect() methods, e.g. for derived classes.""" + + wrapt.wrap_function_wrapper( + module, + name, + _instrumented_connect, + ) + + +def _instrument(): + def instrumented_send( + wrapped: Callable[..., R], + instance: http.client.HTTPConnection, + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> R: + done = trysetip(instance) + result = wrapped(*args, **kwargs) + if not done: + trysetip(instance, loglevel=logging.WARNING) + return result + + wrapt.wrap_function_wrapper( + http.client.HTTPConnection, + "send", + instrumented_send, + ) + + instrument_connect(http.client.HTTPConnection) + # No need to instrument HTTPSConnection, as it calls super().connect() + + +class _ConnectionState(TypedDict): + need_ip: list[Span] + + +def _getstate() -> _ConnectionState | None: + return cast(_ConnectionState, context.get_value(_STATE_KEY)) + + +@contextlib.contextmanager +def set_ip_on_next_http_connection(span: Span): + state = _getstate() + if not state: + token = context.attach( + context.set_value(_STATE_KEY, {"need_ip": [span]}) + ) + try: + yield + finally: + context.detach(token) + else: + spans = state["need_ip"] + spans.append(span) + try: + yield + finally: + try: + spans.remove(span) + except ValueError: # Span might have become non-recording + pass + + +def _uninstrument(): + unwrap(http.client.HTTPConnection, "send") + unwrap(http.client.HTTPConnection, "connect") diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/http/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/http/version.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/version.py new file mode 100644 index 00000000..7fb5b98b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/http/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.52b1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/util/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/py.typed diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/re.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/re.py new file mode 100644 index 00000000..2436cb61 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/re.py @@ -0,0 +1,114 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from re import compile, split +from typing import Dict, List, Mapping +from urllib.parse import unquote + +from deprecated import deprecated + +_logger = getLogger(__name__) + +# The following regexes reference this spec: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables + +# Optional whitespace +_OWS = r"[ \t]*" +# A key contains printable US-ASCII characters except: SP and "(),/:;<=>?@[\]{} +_KEY_FORMAT = ( + r"[\x21\x23-\x27\x2a\x2b\x2d\x2e\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+" +) +# A value contains a URL-encoded UTF-8 string. The encoded form can contain any +# printable US-ASCII characters (0x20-0x7f) other than SP, DEL, and ",;/ +_VALUE_FORMAT = r"[\x21\x23-\x2b\x2d-\x3a\x3c-\x5b\x5d-\x7e]*" +# Like above with SP included +_LIBERAL_VALUE_FORMAT = r"[\x20\x21\x23-\x2b\x2d-\x3a\x3c-\x5b\x5d-\x7e]*" +# A key-value is key=value, with optional whitespace surrounding key and value +_KEY_VALUE_FORMAT = rf"{_OWS}{_KEY_FORMAT}{_OWS}={_OWS}{_VALUE_FORMAT}{_OWS}" + +_HEADER_PATTERN = compile(_KEY_VALUE_FORMAT) +_LIBERAL_HEADER_PATTERN = compile( + rf"{_OWS}{_KEY_FORMAT}{_OWS}={_OWS}{_LIBERAL_VALUE_FORMAT}{_OWS}" +) +_DELIMITER_PATTERN = compile(r"[ \t]*,[ \t]*") + +_BAGGAGE_PROPERTY_FORMAT = rf"{_KEY_VALUE_FORMAT}|{_OWS}{_KEY_FORMAT}{_OWS}" + +_INVALID_HEADER_ERROR_MESSAGE_STRICT_TEMPLATE = ( + "Header format invalid! Header values in environment variables must be " + "URL encoded per the OpenTelemetry Protocol Exporter specification: %s" +) + +_INVALID_HEADER_ERROR_MESSAGE_LIBERAL_TEMPLATE = ( + "Header format invalid! Header values in environment variables must be " + "URL encoded per the OpenTelemetry Protocol Exporter specification or " + "a comma separated list of name=value occurrences: %s" +) + +# pylint: disable=invalid-name + + +@deprecated(version="1.15.0", reason="You should use parse_env_headers") # type: ignore +def parse_headers(s: str) -> Mapping[str, str]: + return parse_env_headers(s) + + +def parse_env_headers(s: str, liberal: bool = False) -> Mapping[str, str]: + """ + Parse ``s``, which is a ``str`` instance containing HTTP headers encoded + for use in ENV variables per the W3C Baggage HTTP header format at + https://www.w3.org/TR/baggage/#baggage-http-header-format, except that + additional semi-colon delimited metadata is not supported. + If ``liberal`` is True we try to parse ``s`` anyway to be more compatible + with other languages SDKs that accept non URL-encoded headers by default. + """ + headers: Dict[str, str] = {} + headers_list: List[str] = split(_DELIMITER_PATTERN, s) + for header in headers_list: + if not header: # empty string + continue + header_match = _HEADER_PATTERN.fullmatch(header.strip()) + if not header_match and not liberal: + _logger.warning( + _INVALID_HEADER_ERROR_MESSAGE_STRICT_TEMPLATE, header + ) + continue + + if header_match: + match_string: str = header_match.string + # value may contain any number of `=` + name, value = match_string.split("=", 1) + name = unquote(name).strip().lower() + value = unquote(value).strip() + headers[name] = value + else: + # this is not url-encoded and does not match the spec but we decided to be + # liberal in what we accept to match other languages SDKs behaviour + liberal_header_match = _LIBERAL_HEADER_PATTERN.fullmatch( + header.strip() + ) + if not liberal_header_match: + _logger.warning( + _INVALID_HEADER_ERROR_MESSAGE_LIBERAL_TEMPLATE, header + ) + continue + + liberal_match_string: str = liberal_header_match.string + # value may contain any number of `=` + name, value = liberal_match_string.split("=", 1) + name = name.strip().lower() + value = value.strip() + headers[name] = value + + return headers diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/util/types.py b/.venv/lib/python3.12/site-packages/opentelemetry/util/types.py new file mode 100644 index 00000000..be311faf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/util/types.py @@ -0,0 +1,57 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Mapping, Optional, Sequence, Tuple, Union + +# This is the implementation of the "Any" type as specified by the specifications of OpenTelemetry data model for logs. +# For more details, refer to the OTel specification: +# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#type-any +AnyValue = Union[ + str, + bool, + int, + float, + bytes, + Sequence["AnyValue"], + Mapping[str, "AnyValue"], + None, +] + +AttributeValue = Union[ + str, + bool, + int, + float, + Sequence[str], + Sequence[bool], + Sequence[int], + Sequence[float], +] +Attributes = Optional[Mapping[str, AttributeValue]] +AttributesAsKey = Tuple[ + Tuple[ + str, + Union[ + str, + bool, + int, + float, + Tuple[Optional[str], ...], + Tuple[Optional[bool], ...], + Tuple[Optional[int], ...], + Tuple[Optional[float], ...], + ], + ], + ..., +] diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/version/__init__.py b/.venv/lib/python3.12/site-packages/opentelemetry/version/__init__.py new file mode 100644 index 00000000..09125bac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/version/__init__.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "1.31.1" diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/version/py.typed b/.venv/lib/python3.12/site-packages/opentelemetry/version/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/opentelemetry/version/py.typed |