aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics')
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py59
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py105
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py152
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py94
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py386
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py34
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py170
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py104
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py187
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py190
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py871
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py186
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py162
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py235
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py211
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py611
-rw-r--r--.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py233
17 files changed, 3990 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py
new file mode 100644
index 00000000..2e45a2ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/azure_metrics.py
@@ -0,0 +1,59 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Histogram, Meter, UpDownCounter
+
+AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT: Final = (
+ "azure.cosmosdb.client.active_instance.count"
+)
+"""
+Number of active client instances
+Instrument: updowncounter
+Unit: {instance}
+"""
+
+
+def create_azure_cosmosdb_client_active_instance_count(
+ meter: Meter,
+) -> UpDownCounter:
+ """Number of active client instances"""
+ return meter.create_up_down_counter(
+ name=AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT,
+ description="Number of active client instances",
+ unit="{instance}",
+ )
+
+
+AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE: Final = (
+ "azure.cosmosdb.client.operation.request_charge"
+)
+"""
+[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation
+Instrument: histogram
+Unit: {request_unit}
+"""
+
+
+def create_azure_cosmosdb_client_operation_request_charge(
+ meter: Meter,
+) -> Histogram:
+ """[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation"""
+ return meter.create_histogram(
+ name=AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE,
+ description="[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation",
+ unit="{request_unit}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py
new file mode 100644
index 00000000..53fbfaca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cicd_metrics.py
@@ -0,0 +1,105 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter
+
+CICD_PIPELINE_RUN_ACTIVE: Final = "cicd.pipeline.run.active"
+"""
+The number of pipeline runs currently active in the system by state
+Instrument: updowncounter
+Unit: {run}
+"""
+
+
+def create_cicd_pipeline_run_active(meter: Meter) -> UpDownCounter:
+ """The number of pipeline runs currently active in the system by state"""
+ return meter.create_up_down_counter(
+ name=CICD_PIPELINE_RUN_ACTIVE,
+ description="The number of pipeline runs currently active in the system by state.",
+ unit="{run}",
+ )
+
+
+CICD_PIPELINE_RUN_DURATION: Final = "cicd.pipeline.run.duration"
+"""
+Duration of a pipeline run grouped by pipeline, state and result
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_cicd_pipeline_run_duration(meter: Meter) -> Histogram:
+ """Duration of a pipeline run grouped by pipeline, state and result"""
+ return meter.create_histogram(
+ name=CICD_PIPELINE_RUN_DURATION,
+ description="Duration of a pipeline run grouped by pipeline, state and result.",
+ unit="s",
+ )
+
+
+CICD_PIPELINE_RUN_ERRORS: Final = "cicd.pipeline.run.errors"
+"""
+The number of errors encountered in pipeline runs (eg. compile, test failures)
+Instrument: counter
+Unit: {error}
+Note: There might be errors in a pipeline run that are non fatal (eg. they are suppressed) or in a parallel stage multiple stages could have a fatal error.
+This means that this error count might not be the same as the count of metric `cicd.pipeline.run.duration` with run result `failure`.
+"""
+
+
+def create_cicd_pipeline_run_errors(meter: Meter) -> Counter:
+ """The number of errors encountered in pipeline runs (eg. compile, test failures)"""
+ return meter.create_counter(
+ name=CICD_PIPELINE_RUN_ERRORS,
+ description="The number of errors encountered in pipeline runs (eg. compile, test failures).",
+ unit="{error}",
+ )
+
+
+CICD_SYSTEM_ERRORS: Final = "cicd.system.errors"
+"""
+The number of errors in a component of the CICD system (eg. controller, scheduler, agent)
+Instrument: counter
+Unit: {error}
+Note: Errors in pipeline run execution are explicitly excluded. Ie a test failure is not counted in this metric.
+"""
+
+
+def create_cicd_system_errors(meter: Meter) -> Counter:
+ """The number of errors in a component of the CICD system (eg. controller, scheduler, agent)"""
+ return meter.create_counter(
+ name=CICD_SYSTEM_ERRORS,
+ description="The number of errors in a component of the CICD system (eg. controller, scheduler, agent).",
+ unit="{error}",
+ )
+
+
+CICD_WORKER_COUNT: Final = "cicd.worker.count"
+"""
+The number of workers on the CICD system by state
+Instrument: updowncounter
+Unit: {count}
+"""
+
+
+def create_cicd_worker_count(meter: Meter) -> UpDownCounter:
+ """The number of workers on the CICD system by state"""
+ return meter.create_up_down_counter(
+ name=CICD_WORKER_COUNT,
+ description="The number of workers on the CICD system by state.",
+ unit="{count}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py
new file mode 100644
index 00000000..ca4a9131
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/container_metrics.py
@@ -0,0 +1,152 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+CONTAINER_CPU_TIME: Final = "container.cpu.time"
+"""
+Total CPU time consumed
+Instrument: counter
+Unit: s
+Note: Total CPU time consumed by the specific container on all available CPU cores.
+"""
+
+
+def create_container_cpu_time(meter: Meter) -> Counter:
+ """Total CPU time consumed"""
+ return meter.create_counter(
+ name=CONTAINER_CPU_TIME,
+ description="Total CPU time consumed",
+ unit="s",
+ )
+
+
+CONTAINER_CPU_USAGE: Final = "container.cpu.usage"
+"""
+Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs
+Instrument: gauge
+Unit: {cpu}
+Note: CPU usage of the specific container on all available CPU cores, averaged over the sample window.
+"""
+
+
+def create_container_cpu_usage(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"""
+ return meter.create_observable_gauge(
+ name=CONTAINER_CPU_USAGE,
+ callbacks=callbacks,
+ description="Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs",
+ unit="{cpu}",
+ )
+
+
+CONTAINER_DISK_IO: Final = "container.disk.io"
+"""
+Disk bytes for the container
+Instrument: counter
+Unit: By
+Note: The total number of bytes read/written successfully (aggregated from all disks).
+"""
+
+
+def create_container_disk_io(meter: Meter) -> Counter:
+ """Disk bytes for the container"""
+ return meter.create_counter(
+ name=CONTAINER_DISK_IO,
+ description="Disk bytes for the container.",
+ unit="By",
+ )
+
+
+CONTAINER_MEMORY_USAGE: Final = "container.memory.usage"
+"""
+Memory usage of the container
+Instrument: counter
+Unit: By
+Note: Memory usage of the container.
+"""
+
+
+def create_container_memory_usage(meter: Meter) -> Counter:
+ """Memory usage of the container"""
+ return meter.create_counter(
+ name=CONTAINER_MEMORY_USAGE,
+ description="Memory usage of the container.",
+ unit="By",
+ )
+
+
+CONTAINER_NETWORK_IO: Final = "container.network.io"
+"""
+Network bytes for the container
+Instrument: counter
+Unit: By
+Note: The number of bytes sent/received on all network interfaces by the container.
+"""
+
+
+def create_container_network_io(meter: Meter) -> Counter:
+ """Network bytes for the container"""
+ return meter.create_counter(
+ name=CONTAINER_NETWORK_IO,
+ description="Network bytes for the container.",
+ unit="By",
+ )
+
+
+CONTAINER_UPTIME: Final = "container.uptime"
+"""
+The time the container has been running
+Instrument: gauge
+Unit: s
+Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
+The actual accuracy would depend on the instrumentation and operating system.
+"""
+
+
+def create_container_uptime(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time the container has been running"""
+ return meter.create_observable_gauge(
+ name=CONTAINER_UPTIME,
+ callbacks=callbacks,
+ description="The time the container has been running",
+ unit="s",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py
new file mode 100644
index 00000000..86bc5a67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/cpu_metrics.py
@@ -0,0 +1,94 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+CPU_FREQUENCY: Final = "cpu.frequency"
+"""
+Operating frequency of the logical CPU in Hertz
+Instrument: gauge
+Unit: Hz
+"""
+
+
+def create_cpu_frequency(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Operating frequency of the logical CPU in Hertz"""
+ return meter.create_observable_gauge(
+ name=CPU_FREQUENCY,
+ callbacks=callbacks,
+ description="Operating frequency of the logical CPU in Hertz.",
+ unit="Hz",
+ )
+
+
+CPU_TIME: Final = "cpu.time"
+"""
+Seconds each logical CPU spent on each mode
+Instrument: counter
+Unit: s
+"""
+
+
+def create_cpu_time(meter: Meter) -> Counter:
+ """Seconds each logical CPU spent on each mode"""
+ return meter.create_counter(
+ name=CPU_TIME,
+ description="Seconds each logical CPU spent on each mode",
+ unit="s",
+ )
+
+
+CPU_UTILIZATION: Final = "cpu.utilization"
+"""
+For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time
+Instrument: gauge
+Unit: 1
+"""
+
+
+def create_cpu_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time"""
+ return meter.create_observable_gauge(
+ name=CPU_UTILIZATION,
+ callbacks=callbacks,
+ description="For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time.",
+ unit="1",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py
new file mode 100644
index 00000000..32c0f55f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/db_metrics.py
@@ -0,0 +1,386 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter
+
+DB_CLIENT_CONNECTION_COUNT: Final = "db.client.connection.count"
+"""
+The number of connections that are currently in state described by the `state` attribute
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_db_client_connection_count(meter: Meter) -> UpDownCounter:
+ """The number of connections that are currently in state described by the `state` attribute"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTION_COUNT,
+ description="The number of connections that are currently in state described by the `state` attribute",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTION_CREATE_TIME: Final = "db.client.connection.create_time"
+"""
+The time it took to create a new connection
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_db_client_connection_create_time(meter: Meter) -> Histogram:
+ """The time it took to create a new connection"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTION_CREATE_TIME,
+ description="The time it took to create a new connection",
+ unit="s",
+ )
+
+
+DB_CLIENT_CONNECTION_IDLE_MAX: Final = "db.client.connection.idle.max"
+"""
+The maximum number of idle open connections allowed
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_db_client_connection_idle_max(meter: Meter) -> UpDownCounter:
+ """The maximum number of idle open connections allowed"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTION_IDLE_MAX,
+ description="The maximum number of idle open connections allowed",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTION_IDLE_MIN: Final = "db.client.connection.idle.min"
+"""
+The minimum number of idle open connections allowed
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_db_client_connection_idle_min(meter: Meter) -> UpDownCounter:
+ """The minimum number of idle open connections allowed"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTION_IDLE_MIN,
+ description="The minimum number of idle open connections allowed",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTION_MAX: Final = "db.client.connection.max"
+"""
+The maximum number of open connections allowed
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_db_client_connection_max(meter: Meter) -> UpDownCounter:
+ """The maximum number of open connections allowed"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTION_MAX,
+ description="The maximum number of open connections allowed",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTION_PENDING_REQUESTS: Final = (
+ "db.client.connection.pending_requests"
+)
+"""
+The number of current pending requests for an open connection
+Instrument: updowncounter
+Unit: {request}
+"""
+
+
+def create_db_client_connection_pending_requests(
+ meter: Meter,
+) -> UpDownCounter:
+ """The number of current pending requests for an open connection"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTION_PENDING_REQUESTS,
+ description="The number of current pending requests for an open connection",
+ unit="{request}",
+ )
+
+
+DB_CLIENT_CONNECTION_TIMEOUTS: Final = "db.client.connection.timeouts"
+"""
+The number of connection timeouts that have occurred trying to obtain a connection from the pool
+Instrument: counter
+Unit: {timeout}
+"""
+
+
+def create_db_client_connection_timeouts(meter: Meter) -> Counter:
+ """The number of connection timeouts that have occurred trying to obtain a connection from the pool"""
+ return meter.create_counter(
+ name=DB_CLIENT_CONNECTION_TIMEOUTS,
+ description="The number of connection timeouts that have occurred trying to obtain a connection from the pool",
+ unit="{timeout}",
+ )
+
+
+DB_CLIENT_CONNECTION_USE_TIME: Final = "db.client.connection.use_time"
+"""
+The time between borrowing a connection and returning it to the pool
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_db_client_connection_use_time(meter: Meter) -> Histogram:
+ """The time between borrowing a connection and returning it to the pool"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTION_USE_TIME,
+ description="The time between borrowing a connection and returning it to the pool",
+ unit="s",
+ )
+
+
+DB_CLIENT_CONNECTION_WAIT_TIME: Final = "db.client.connection.wait_time"
+"""
+The time it took to obtain an open connection from the pool
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_db_client_connection_wait_time(meter: Meter) -> Histogram:
+ """The time it took to obtain an open connection from the pool"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTION_WAIT_TIME,
+ description="The time it took to obtain an open connection from the pool",
+ unit="s",
+ )
+
+
+DB_CLIENT_CONNECTIONS_CREATE_TIME: Final = "db.client.connections.create_time"
+"""
+Deprecated: Replaced by `db.client.connection.create_time`. Note: the unit also changed from `ms` to `s`.
+"""
+
+
+def create_db_client_connections_create_time(meter: Meter) -> Histogram:
+ """Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTIONS_CREATE_TIME,
+ description="Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`.",
+ unit="ms",
+ )
+
+
+DB_CLIENT_CONNECTIONS_IDLE_MAX: Final = "db.client.connections.idle.max"
+"""
+Deprecated: Replaced by `db.client.connection.idle.max`.
+"""
+
+
+def create_db_client_connections_idle_max(meter: Meter) -> UpDownCounter:
+ """Deprecated, use `db.client.connection.idle.max` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTIONS_IDLE_MAX,
+ description="Deprecated, use `db.client.connection.idle.max` instead.",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_IDLE_MIN: Final = "db.client.connections.idle.min"
+"""
+Deprecated: Replaced by `db.client.connection.idle.min`.
+"""
+
+
+def create_db_client_connections_idle_min(meter: Meter) -> UpDownCounter:
+ """Deprecated, use `db.client.connection.idle.min` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTIONS_IDLE_MIN,
+ description="Deprecated, use `db.client.connection.idle.min` instead.",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_MAX: Final = "db.client.connections.max"
+"""
+Deprecated: Replaced by `db.client.connection.max`.
+"""
+
+
+def create_db_client_connections_max(meter: Meter) -> UpDownCounter:
+ """Deprecated, use `db.client.connection.max` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTIONS_MAX,
+ description="Deprecated, use `db.client.connection.max` instead.",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: Final = (
+ "db.client.connections.pending_requests"
+)
+"""
+Deprecated: Replaced by `db.client.connection.pending_requests`.
+"""
+
+
+def create_db_client_connections_pending_requests(
+ meter: Meter,
+) -> UpDownCounter:
+ """Deprecated, use `db.client.connection.pending_requests` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTIONS_PENDING_REQUESTS,
+ description="Deprecated, use `db.client.connection.pending_requests` instead.",
+ unit="{request}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_TIMEOUTS: Final = "db.client.connections.timeouts"
+"""
+Deprecated: Replaced by `db.client.connection.timeouts`.
+"""
+
+
+def create_db_client_connections_timeouts(meter: Meter) -> Counter:
+ """Deprecated, use `db.client.connection.timeouts` instead"""
+ return meter.create_counter(
+ name=DB_CLIENT_CONNECTIONS_TIMEOUTS,
+ description="Deprecated, use `db.client.connection.timeouts` instead.",
+ unit="{timeout}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_USAGE: Final = "db.client.connections.usage"
+"""
+Deprecated: Replaced by `db.client.connection.count`.
+"""
+
+
+def create_db_client_connections_usage(meter: Meter) -> UpDownCounter:
+ """Deprecated, use `db.client.connection.count` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_CONNECTIONS_USAGE,
+ description="Deprecated, use `db.client.connection.count` instead.",
+ unit="{connection}",
+ )
+
+
+DB_CLIENT_CONNECTIONS_USE_TIME: Final = "db.client.connections.use_time"
+"""
+Deprecated: Replaced by `db.client.connection.use_time`. Note: the unit also changed from `ms` to `s`.
+"""
+
+
+def create_db_client_connections_use_time(meter: Meter) -> Histogram:
+ """Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTIONS_USE_TIME,
+ description="Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`.",
+ unit="ms",
+ )
+
+
+DB_CLIENT_CONNECTIONS_WAIT_TIME: Final = "db.client.connections.wait_time"
+"""
+Deprecated: Replaced by `db.client.connection.wait_time`. Note: the unit also changed from `ms` to `s`.
+"""
+
+
+def create_db_client_connections_wait_time(meter: Meter) -> Histogram:
+ """Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`"""
+ return meter.create_histogram(
+ name=DB_CLIENT_CONNECTIONS_WAIT_TIME,
+ description="Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`.",
+ unit="ms",
+ )
+
+
+DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: Final = (
+ "db.client.cosmosdb.active_instance.count"
+)
+"""
+Deprecated: Replaced by `azure.cosmosdb.client.active_instance.count`.
+"""
+
+
+def create_db_client_cosmosdb_active_instance_count(
+ meter: Meter,
+) -> UpDownCounter:
+ """Deprecated, use `azure.cosmosdb.client.active_instance.count` instead"""
+ return meter.create_up_down_counter(
+ name=DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT,
+ description="Deprecated, use `azure.cosmosdb.client.active_instance.count` instead.",
+ unit="{instance}",
+ )
+
+
+DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = (
+ "db.client.cosmosdb.operation.request_charge"
+)
+"""
+Deprecated: Replaced by `azure.cosmosdb.client.operation.request_charge`.
+"""
+
+
+def create_db_client_cosmosdb_operation_request_charge(
+ meter: Meter,
+) -> Histogram:
+ """Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead"""
+ return meter.create_histogram(
+ name=DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE,
+ description="Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead.",
+ unit="{request_unit}",
+ )
+
+
+DB_CLIENT_OPERATION_DURATION: Final = "db.client.operation.duration"
+"""
+Duration of database client operations
+Instrument: histogram
+Unit: s
+Note: Batch operations SHOULD be recorded as a single operation.
+"""
+
+
+def create_db_client_operation_duration(meter: Meter) -> Histogram:
+ """Duration of database client operations"""
+ return meter.create_histogram(
+ name=DB_CLIENT_OPERATION_DURATION,
+ description="Duration of database client operations.",
+ unit="s",
+ )
+
+
+DB_CLIENT_RESPONSE_RETURNED_ROWS: Final = "db.client.response.returned_rows"
+"""
+The actual number of records returned by the database operation
+Instrument: histogram
+Unit: {row}
+"""
+
+
+def create_db_client_response_returned_rows(meter: Meter) -> Histogram:
+ """The actual number of records returned by the database operation"""
+ return meter.create_histogram(
+ name=DB_CLIENT_RESPONSE_RETURNED_ROWS,
+ description="The actual number of records returned by the database operation.",
+ unit="{row}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py
new file mode 100644
index 00000000..53fb3d26
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/dns_metrics.py
@@ -0,0 +1,34 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Histogram, Meter
+
+DNS_LOOKUP_DURATION: Final = "dns.lookup.duration"
+"""
+Measures the time taken to perform a DNS lookup
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_dns_lookup_duration(meter: Meter) -> Histogram:
+ """Measures the time taken to perform a DNS lookup"""
+ return meter.create_histogram(
+ name=DNS_LOOKUP_DURATION,
+ description="Measures the time taken to perform a DNS lookup.",
+ unit="s",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py
new file mode 100644
index 00000000..5fd14149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/faas_metrics.py
@@ -0,0 +1,170 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Counter, Histogram, Meter
+
+FAAS_COLDSTARTS: Final = "faas.coldstarts"
+"""
+Number of invocation cold starts
+Instrument: counter
+Unit: {coldstart}
+"""
+
+
+def create_faas_coldstarts(meter: Meter) -> Counter:
+ """Number of invocation cold starts"""
+ return meter.create_counter(
+ name=FAAS_COLDSTARTS,
+ description="Number of invocation cold starts",
+ unit="{coldstart}",
+ )
+
+
+FAAS_CPU_USAGE: Final = "faas.cpu_usage"
+"""
+Distribution of CPU usage per invocation
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_faas_cpu_usage(meter: Meter) -> Histogram:
+ """Distribution of CPU usage per invocation"""
+ return meter.create_histogram(
+ name=FAAS_CPU_USAGE,
+ description="Distribution of CPU usage per invocation",
+ unit="s",
+ )
+
+
+FAAS_ERRORS: Final = "faas.errors"
+"""
+Number of invocation errors
+Instrument: counter
+Unit: {error}
+"""
+
+
+def create_faas_errors(meter: Meter) -> Counter:
+ """Number of invocation errors"""
+ return meter.create_counter(
+ name=FAAS_ERRORS,
+ description="Number of invocation errors",
+ unit="{error}",
+ )
+
+
+FAAS_INIT_DURATION: Final = "faas.init_duration"
+"""
+Measures the duration of the function's initialization, such as a cold start
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_faas_init_duration(meter: Meter) -> Histogram:
+ """Measures the duration of the function's initialization, such as a cold start"""
+ return meter.create_histogram(
+ name=FAAS_INIT_DURATION,
+ description="Measures the duration of the function's initialization, such as a cold start",
+ unit="s",
+ )
+
+
+FAAS_INVOCATIONS: Final = "faas.invocations"
+"""
+Number of successful invocations
+Instrument: counter
+Unit: {invocation}
+"""
+
+
+def create_faas_invocations(meter: Meter) -> Counter:
+ """Number of successful invocations"""
+ return meter.create_counter(
+ name=FAAS_INVOCATIONS,
+ description="Number of successful invocations",
+ unit="{invocation}",
+ )
+
+
+FAAS_INVOKE_DURATION: Final = "faas.invoke_duration"
+"""
+Measures the duration of the function's logic execution
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_faas_invoke_duration(meter: Meter) -> Histogram:
+ """Measures the duration of the function's logic execution"""
+ return meter.create_histogram(
+ name=FAAS_INVOKE_DURATION,
+ description="Measures the duration of the function's logic execution",
+ unit="s",
+ )
+
+
+FAAS_MEM_USAGE: Final = "faas.mem_usage"
+"""
+Distribution of max memory usage per invocation
+Instrument: histogram
+Unit: By
+"""
+
+
+def create_faas_mem_usage(meter: Meter) -> Histogram:
+ """Distribution of max memory usage per invocation"""
+ return meter.create_histogram(
+ name=FAAS_MEM_USAGE,
+ description="Distribution of max memory usage per invocation",
+ unit="By",
+ )
+
+
+FAAS_NET_IO: Final = "faas.net_io"
+"""
+Distribution of net I/O usage per invocation
+Instrument: histogram
+Unit: By
+"""
+
+
+def create_faas_net_io(meter: Meter) -> Histogram:
+ """Distribution of net I/O usage per invocation"""
+ return meter.create_histogram(
+ name=FAAS_NET_IO,
+ description="Distribution of net I/O usage per invocation",
+ unit="By",
+ )
+
+
+FAAS_TIMEOUTS: Final = "faas.timeouts"
+"""
+Number of invocation timeouts
+Instrument: counter
+Unit: {timeout}
+"""
+
+
+def create_faas_timeouts(meter: Meter) -> Counter:
+ """Number of invocation timeouts"""
+ return meter.create_counter(
+ name=FAAS_TIMEOUTS,
+ description="Number of invocation timeouts",
+ unit="{timeout}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py
new file mode 100644
index 00000000..97d9dd00
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/gen_ai_metrics.py
@@ -0,0 +1,104 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Histogram, Meter
+
+GEN_AI_CLIENT_OPERATION_DURATION: Final = "gen_ai.client.operation.duration"
+"""
+GenAI operation duration
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_gen_ai_client_operation_duration(meter: Meter) -> Histogram:
+ """GenAI operation duration"""
+ return meter.create_histogram(
+ name=GEN_AI_CLIENT_OPERATION_DURATION,
+ description="GenAI operation duration",
+ unit="s",
+ )
+
+
+GEN_AI_CLIENT_TOKEN_USAGE: Final = "gen_ai.client.token.usage"
+"""
+Measures number of input and output tokens used
+Instrument: histogram
+Unit: {token}
+"""
+
+
+def create_gen_ai_client_token_usage(meter: Meter) -> Histogram:
+ """Measures number of input and output tokens used"""
+ return meter.create_histogram(
+ name=GEN_AI_CLIENT_TOKEN_USAGE,
+ description="Measures number of input and output tokens used",
+ unit="{token}",
+ )
+
+
+GEN_AI_SERVER_REQUEST_DURATION: Final = "gen_ai.server.request.duration"
+"""
+Generative AI server request duration such as time-to-last byte or last output token
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_gen_ai_server_request_duration(meter: Meter) -> Histogram:
+ """Generative AI server request duration such as time-to-last byte or last output token"""
+ return meter.create_histogram(
+ name=GEN_AI_SERVER_REQUEST_DURATION,
+ description="Generative AI server request duration such as time-to-last byte or last output token",
+ unit="s",
+ )
+
+
+GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: Final = (
+ "gen_ai.server.time_per_output_token"
+)
+"""
+Time per output token generated after the first token for successful responses
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_gen_ai_server_time_per_output_token(meter: Meter) -> Histogram:
+ """Time per output token generated after the first token for successful responses"""
+ return meter.create_histogram(
+ name=GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN,
+ description="Time per output token generated after the first token for successful responses",
+ unit="s",
+ )
+
+
+GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: Final = "gen_ai.server.time_to_first_token"
+"""
+Time to generate first token for successful responses
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_gen_ai_server_time_to_first_token(meter: Meter) -> Histogram:
+ """Time to generate first token for successful responses"""
+ return meter.create_histogram(
+ name=GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
+ description="Time to generate first token for successful responses",
+ unit="s",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py
new file mode 100644
index 00000000..86d0317e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/http_metrics.py
@@ -0,0 +1,187 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Histogram, Meter, UpDownCounter
+
+HTTP_CLIENT_ACTIVE_REQUESTS: Final = "http.client.active_requests"
+"""
+Number of active HTTP requests
+Instrument: updowncounter
+Unit: {request}
+"""
+
+
+def create_http_client_active_requests(meter: Meter) -> UpDownCounter:
+ """Number of active HTTP requests"""
+ return meter.create_up_down_counter(
+ name=HTTP_CLIENT_ACTIVE_REQUESTS,
+ description="Number of active HTTP requests.",
+ unit="{request}",
+ )
+
+
+HTTP_CLIENT_CONNECTION_DURATION: Final = "http.client.connection.duration"
+"""
+The duration of the successfully established outbound HTTP connections
+Instrument: histogram
+Unit: s
+"""
+
+
+def create_http_client_connection_duration(meter: Meter) -> Histogram:
+ """The duration of the successfully established outbound HTTP connections"""
+ return meter.create_histogram(
+ name=HTTP_CLIENT_CONNECTION_DURATION,
+ description="The duration of the successfully established outbound HTTP connections.",
+ unit="s",
+ )
+
+
+HTTP_CLIENT_OPEN_CONNECTIONS: Final = "http.client.open_connections"
+"""
+Number of outbound HTTP connections that are currently active or idle on the client
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_http_client_open_connections(meter: Meter) -> UpDownCounter:
+ """Number of outbound HTTP connections that are currently active or idle on the client"""
+ return meter.create_up_down_counter(
+ name=HTTP_CLIENT_OPEN_CONNECTIONS,
+ description="Number of outbound HTTP connections that are currently active or idle on the client.",
+ unit="{connection}",
+ )
+
+
+HTTP_CLIENT_REQUEST_BODY_SIZE: Final = "http.client.request.body.size"
+"""
+Size of HTTP client request bodies
+Instrument: histogram
+Unit: By
+Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size.
+"""
+
+
+def create_http_client_request_body_size(meter: Meter) -> Histogram:
+ """Size of HTTP client request bodies"""
+ return meter.create_histogram(
+ name=HTTP_CLIENT_REQUEST_BODY_SIZE,
+ description="Size of HTTP client request bodies.",
+ unit="By",
+ )
+
+
+HTTP_CLIENT_REQUEST_DURATION: Final = "http.client.request.duration"
+"""
+Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_CLIENT_REQUEST_DURATION`.
+"""
+
+
+def create_http_client_request_duration(meter: Meter) -> Histogram:
+ """Duration of HTTP client requests"""
+ return meter.create_histogram(
+ name=HTTP_CLIENT_REQUEST_DURATION,
+ description="Duration of HTTP client requests.",
+ unit="s",
+ )
+
+
+HTTP_CLIENT_RESPONSE_BODY_SIZE: Final = "http.client.response.body.size"
+"""
+Size of HTTP client response bodies
+Instrument: histogram
+Unit: By
+Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size.
+"""
+
+
+def create_http_client_response_body_size(meter: Meter) -> Histogram:
+ """Size of HTTP client response bodies"""
+ return meter.create_histogram(
+ name=HTTP_CLIENT_RESPONSE_BODY_SIZE,
+ description="Size of HTTP client response bodies.",
+ unit="By",
+ )
+
+
+HTTP_SERVER_ACTIVE_REQUESTS: Final = "http.server.active_requests"
+"""
+Number of active HTTP server requests
+Instrument: updowncounter
+Unit: {request}
+"""
+
+
+def create_http_server_active_requests(meter: Meter) -> UpDownCounter:
+ """Number of active HTTP server requests"""
+ return meter.create_up_down_counter(
+ name=HTTP_SERVER_ACTIVE_REQUESTS,
+ description="Number of active HTTP server requests.",
+ unit="{request}",
+ )
+
+
+HTTP_SERVER_REQUEST_BODY_SIZE: Final = "http.server.request.body.size"
+"""
+Size of HTTP server request bodies
+Instrument: histogram
+Unit: By
+Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size.
+"""
+
+
+def create_http_server_request_body_size(meter: Meter) -> Histogram:
+ """Size of HTTP server request bodies"""
+ return meter.create_histogram(
+ name=HTTP_SERVER_REQUEST_BODY_SIZE,
+ description="Size of HTTP server request bodies.",
+ unit="By",
+ )
+
+
+HTTP_SERVER_REQUEST_DURATION: Final = "http.server.request.duration"
+"""
+Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_SERVER_REQUEST_DURATION`.
+"""
+
+
+def create_http_server_request_duration(meter: Meter) -> Histogram:
+ """Duration of HTTP server requests"""
+ return meter.create_histogram(
+ name=HTTP_SERVER_REQUEST_DURATION,
+ description="Duration of HTTP server requests.",
+ unit="s",
+ )
+
+
+HTTP_SERVER_RESPONSE_BODY_SIZE: Final = "http.server.response.body.size"
+"""
+Size of HTTP server response bodies
+Instrument: histogram
+Unit: By
+Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size.
+"""
+
+
+def create_http_server_response_body_size(meter: Meter) -> Histogram:
+ """Size of HTTP server response bodies"""
+ return meter.create_histogram(
+ name=HTTP_SERVER_RESPONSE_BODY_SIZE,
+ description="Size of HTTP server response bodies.",
+ unit="By",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py
new file mode 100644
index 00000000..d06890fd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/hw_metrics.py
@@ -0,0 +1,190 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+ UpDownCounter,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+HW_ENERGY: Final = "hw.energy"
+"""
+Energy consumed by the component
+Instrument: counter
+Unit: J
+"""
+
+
+def create_hw_energy(meter: Meter) -> Counter:
+ """Energy consumed by the component"""
+ return meter.create_counter(
+ name=HW_ENERGY,
+ description="Energy consumed by the component",
+ unit="J",
+ )
+
+
+HW_ERRORS: Final = "hw.errors"
+"""
+Number of errors encountered by the component
+Instrument: counter
+Unit: {error}
+"""
+
+
+def create_hw_errors(meter: Meter) -> Counter:
+ """Number of errors encountered by the component"""
+ return meter.create_counter(
+ name=HW_ERRORS,
+ description="Number of errors encountered by the component",
+ unit="{error}",
+ )
+
+
+HW_HOST_AMBIENT_TEMPERATURE: Final = "hw.host.ambient_temperature"
+"""
+Ambient (external) temperature of the physical host
+Instrument: gauge
+Unit: Cel
+"""
+
+
+def create_hw_host_ambient_temperature(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Ambient (external) temperature of the physical host"""
+ return meter.create_observable_gauge(
+ name=HW_HOST_AMBIENT_TEMPERATURE,
+ callbacks=callbacks,
+ description="Ambient (external) temperature of the physical host",
+ unit="Cel",
+ )
+
+
+HW_HOST_ENERGY: Final = "hw.host.energy"
+"""
+Total energy consumed by the entire physical host, in joules
+Instrument: counter
+Unit: J
+Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values.
+"""
+
+
+def create_hw_host_energy(meter: Meter) -> Counter:
+ """Total energy consumed by the entire physical host, in joules"""
+ return meter.create_counter(
+ name=HW_HOST_ENERGY,
+ description="Total energy consumed by the entire physical host, in joules",
+ unit="J",
+ )
+
+
+HW_HOST_HEATING_MARGIN: Final = "hw.host.heating_margin"
+"""
+By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors
+Instrument: gauge
+Unit: Cel
+"""
+
+
+def create_hw_host_heating_margin(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors"""
+ return meter.create_observable_gauge(
+ name=HW_HOST_HEATING_MARGIN,
+ callbacks=callbacks,
+ description="By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors",
+ unit="Cel",
+ )
+
+
+HW_HOST_POWER: Final = "hw.host.power"
+"""
+Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)
+Instrument: gauge
+Unit: W
+Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values.
+"""
+
+
+def create_hw_host_power(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)"""
+ return meter.create_observable_gauge(
+ name=HW_HOST_POWER,
+ callbacks=callbacks,
+ description="Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)",
+ unit="W",
+ )
+
+
+HW_POWER: Final = "hw.power"
+"""
+Instantaneous power consumed by the component
+Instrument: gauge
+Unit: W
+Note: It is recommended to report `hw.energy` instead of `hw.power` when possible.
+"""
+
+
+def create_hw_power(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Instantaneous power consumed by the component"""
+ return meter.create_observable_gauge(
+ name=HW_POWER,
+ callbacks=callbacks,
+ description="Instantaneous power consumed by the component",
+ unit="W",
+ )
+
+
+HW_STATUS: Final = "hw.status"
+"""
+Operational status: `1` (true) or `0` (false) for each of the possible states
+Instrument: updowncounter
+Unit: 1
+Note: `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time.
+"""
+
+
+def create_hw_status(meter: Meter) -> UpDownCounter:
+ """Operational status: `1` (true) or `0` (false) for each of the possible states"""
+ return meter.create_up_down_counter(
+ name=HW_STATUS,
+ description="Operational status: `1` (true) or `0` (false) for each of the possible states",
+ unit="1",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py
new file mode 100644
index 00000000..760d4d55
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py
@@ -0,0 +1,871 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+ UpDownCounter,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+K8S_CRONJOB_ACTIVE_JOBS: Final = "k8s.cronjob.active_jobs"
+"""
+The number of actively running jobs for a cronjob
+Instrument: updowncounter
+Unit: {job}
+Note: This metric aligns with the `active` field of the
+[K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.cronjob`](../resource/k8s.md#cronjob) resource.
+"""
+
+
+def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter:
+ """The number of actively running jobs for a cronjob"""
+ return meter.create_up_down_counter(
+ name=K8S_CRONJOB_ACTIVE_JOBS,
+ description="The number of actively running jobs for a cronjob",
+ unit="{job}",
+ )
+
+
+K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: Final = (
+ "k8s.daemonset.current_scheduled_nodes"
+)
+"""
+Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod
+Instrument: updowncounter
+Unit: {node}
+Note: This metric aligns with the `currentNumberScheduled` field of the
+[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.daemonset`](../resource/k8s.md#daemonset) resource.
+"""
+
+
+def create_k8s_daemonset_current_scheduled_nodes(
+ meter: Meter,
+) -> UpDownCounter:
+ """Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod"""
+ return meter.create_up_down_counter(
+ name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES,
+ description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod",
+ unit="{node}",
+ )
+
+
+K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: Final = (
+ "k8s.daemonset.desired_scheduled_nodes"
+)
+"""
+Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)
+Instrument: updowncounter
+Unit: {node}
+Note: This metric aligns with the `desiredNumberScheduled` field of the
+[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.daemonset`](../resource/k8s.md#daemonset) resource.
+"""
+
+
+def create_k8s_daemonset_desired_scheduled_nodes(
+ meter: Meter,
+) -> UpDownCounter:
+ """Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)"""
+ return meter.create_up_down_counter(
+ name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES,
+ description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)",
+ unit="{node}",
+ )
+
+
+K8S_DAEMONSET_MISSCHEDULED_NODES: Final = "k8s.daemonset.misscheduled_nodes"
+"""
+Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod
+Instrument: updowncounter
+Unit: {node}
+Note: This metric aligns with the `numberMisscheduled` field of the
+[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.daemonset`](../resource/k8s.md#daemonset) resource.
+"""
+
+
+def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter:
+ """Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod"""
+ return meter.create_up_down_counter(
+ name=K8S_DAEMONSET_MISSCHEDULED_NODES,
+ description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod",
+ unit="{node}",
+ )
+
+
+K8S_DAEMONSET_READY_NODES: Final = "k8s.daemonset.ready_nodes"
+"""
+Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready
+Instrument: updowncounter
+Unit: {node}
+Note: This metric aligns with the `numberReady` field of the
+[K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.daemonset`](../resource/k8s.md#daemonset) resource.
+"""
+
+
+def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter:
+ """Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready"""
+ return meter.create_up_down_counter(
+ name=K8S_DAEMONSET_READY_NODES,
+ description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready",
+ unit="{node}",
+ )
+
+
+K8S_DEPLOYMENT_AVAILABLE_PODS: Final = "k8s.deployment.available_pods"
+"""
+Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `availableReplicas` field of the
+[K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.deployment`](../resource/k8s.md#deployment) resource.
+"""
+
+
+def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter:
+ """Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment"""
+ return meter.create_up_down_counter(
+ name=K8S_DEPLOYMENT_AVAILABLE_PODS,
+ description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment",
+ unit="{pod}",
+ )
+
+
+K8S_DEPLOYMENT_DESIRED_PODS: Final = "k8s.deployment.desired_pods"
+"""
+Number of desired replica pods in this deployment
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `replicas` field of the
+[K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.deployment`](../resource/k8s.md#deployment) resource.
+"""
+
+
+def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter:
+ """Number of desired replica pods in this deployment"""
+ return meter.create_up_down_counter(
+ name=K8S_DEPLOYMENT_DESIRED_PODS,
+ description="Number of desired replica pods in this deployment",
+ unit="{pod}",
+ )
+
+
+K8S_HPA_CURRENT_PODS: Final = "k8s.hpa.current_pods"
+"""
+Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `currentReplicas` field of the
+[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource.
+"""
+
+
+def create_k8s_hpa_current_pods(meter: Meter) -> UpDownCounter:
+ """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler"""
+ return meter.create_up_down_counter(
+ name=K8S_HPA_CURRENT_PODS,
+ description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler",
+ unit="{pod}",
+ )
+
+
+K8S_HPA_DESIRED_PODS: Final = "k8s.hpa.desired_pods"
+"""
+Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `desiredReplicas` field of the
+[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource.
+"""
+
+
+def create_k8s_hpa_desired_pods(meter: Meter) -> UpDownCounter:
+ """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler"""
+ return meter.create_up_down_counter(
+ name=K8S_HPA_DESIRED_PODS,
+ description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler",
+ unit="{pod}",
+ )
+
+
+K8S_HPA_MAX_PODS: Final = "k8s.hpa.max_pods"
+"""
+The upper limit for the number of replica pods to which the autoscaler can scale up
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `maxReplicas` field of the
+[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource.
+"""
+
+
+def create_k8s_hpa_max_pods(meter: Meter) -> UpDownCounter:
+ """The upper limit for the number of replica pods to which the autoscaler can scale up"""
+ return meter.create_up_down_counter(
+ name=K8S_HPA_MAX_PODS,
+ description="The upper limit for the number of replica pods to which the autoscaler can scale up",
+ unit="{pod}",
+ )
+
+
+K8S_HPA_MIN_PODS: Final = "k8s.hpa.min_pods"
+"""
+The lower limit for the number of replica pods to which the autoscaler can scale down
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `minReplicas` field of the
+[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.hpa`](../resource/k8s.md#horizontalpodautoscaler) resource.
+"""
+
+
+def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter:
+ """The lower limit for the number of replica pods to which the autoscaler can scale down"""
+ return meter.create_up_down_counter(
+ name=K8S_HPA_MIN_PODS,
+ description="The lower limit for the number of replica pods to which the autoscaler can scale down",
+ unit="{pod}",
+ )
+
+
+K8S_JOB_ACTIVE_PODS: Final = "k8s.job.active_pods"
+"""
+The number of pending and actively running pods for a job
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `active` field of the
+[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.job`](../resource/k8s.md#job) resource.
+"""
+
+
+def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter:
+ """The number of pending and actively running pods for a job"""
+ return meter.create_up_down_counter(
+ name=K8S_JOB_ACTIVE_PODS,
+ description="The number of pending and actively running pods for a job",
+ unit="{pod}",
+ )
+
+
+K8S_JOB_DESIRED_SUCCESSFUL_PODS: Final = "k8s.job.desired_successful_pods"
+"""
+The desired number of successfully finished pods the job should be run with
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `completions` field of the
+[K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.job`](../resource/k8s.md#job) resource.
+"""
+
+
+def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter:
+ """The desired number of successfully finished pods the job should be run with"""
+ return meter.create_up_down_counter(
+ name=K8S_JOB_DESIRED_SUCCESSFUL_PODS,
+ description="The desired number of successfully finished pods the job should be run with",
+ unit="{pod}",
+ )
+
+
+K8S_JOB_FAILED_PODS: Final = "k8s.job.failed_pods"
+"""
+The number of pods which reached phase Failed for a job
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `failed` field of the
+[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.job`](../resource/k8s.md#job) resource.
+"""
+
+
+def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter:
+ """The number of pods which reached phase Failed for a job"""
+ return meter.create_up_down_counter(
+ name=K8S_JOB_FAILED_PODS,
+ description="The number of pods which reached phase Failed for a job",
+ unit="{pod}",
+ )
+
+
+K8S_JOB_MAX_PARALLEL_PODS: Final = "k8s.job.max_parallel_pods"
+"""
+The max desired number of pods the job should run at any given time
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `parallelism` field of the
+[K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.job`](../resource/k8s.md#job) resource.
+"""
+
+
+def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter:
+ """The max desired number of pods the job should run at any given time"""
+ return meter.create_up_down_counter(
+ name=K8S_JOB_MAX_PARALLEL_PODS,
+ description="The max desired number of pods the job should run at any given time",
+ unit="{pod}",
+ )
+
+
+K8S_JOB_SUCCESSFUL_PODS: Final = "k8s.job.successful_pods"
+"""
+The number of pods which reached phase Succeeded for a job
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `succeeded` field of the
+[K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.job`](../resource/k8s.md#job) resource.
+"""
+
+
+def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter:
+ """The number of pods which reached phase Succeeded for a job"""
+ return meter.create_up_down_counter(
+ name=K8S_JOB_SUCCESSFUL_PODS,
+ description="The number of pods which reached phase Succeeded for a job",
+ unit="{pod}",
+ )
+
+
+K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase"
+"""
+Describes number of K8s namespaces that are currently in a given phase
+Instrument: updowncounter
+Unit: {namespace}
+Note: This metric SHOULD, at a minimum, be reported against a
+[`k8s.namespace`](../resource/k8s.md#namespace) resource.
+"""
+
+
+def create_k8s_namespace_phase(meter: Meter) -> UpDownCounter:
+ """Describes number of K8s namespaces that are currently in a given phase"""
+ return meter.create_up_down_counter(
+ name=K8S_NAMESPACE_PHASE,
+ description="Describes number of K8s namespaces that are currently in a given phase.",
+ unit="{namespace}",
+ )
+
+
+K8S_NODE_CPU_TIME: Final = "k8s.node.cpu.time"
+"""
+Total CPU time consumed
+Instrument: counter
+Unit: s
+Note: Total CPU time consumed by the specific Node on all available CPU cores.
+"""
+
+
+def create_k8s_node_cpu_time(meter: Meter) -> Counter:
+ """Total CPU time consumed"""
+ return meter.create_counter(
+ name=K8S_NODE_CPU_TIME,
+ description="Total CPU time consumed",
+ unit="s",
+ )
+
+
+K8S_NODE_CPU_USAGE: Final = "k8s.node.cpu.usage"
+"""
+Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs
+Instrument: gauge
+Unit: {cpu}
+Note: CPU usage of the specific Node on all available CPU cores, averaged over the sample window.
+"""
+
+
+def create_k8s_node_cpu_usage(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"""
+ return meter.create_observable_gauge(
+ name=K8S_NODE_CPU_USAGE,
+ callbacks=callbacks,
+ description="Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs",
+ unit="{cpu}",
+ )
+
+
+K8S_NODE_MEMORY_USAGE: Final = "k8s.node.memory.usage"
+"""
+Memory usage of the Node
+Instrument: gauge
+Unit: By
+Note: Total memory usage of the Node.
+"""
+
+
+def create_k8s_node_memory_usage(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Memory usage of the Node"""
+ return meter.create_observable_gauge(
+ name=K8S_NODE_MEMORY_USAGE,
+ callbacks=callbacks,
+ description="Memory usage of the Node",
+ unit="By",
+ )
+
+
+K8S_NODE_NETWORK_ERRORS: Final = "k8s.node.network.errors"
+"""
+Node network errors
+Instrument: counter
+Unit: {error}
+"""
+
+
+def create_k8s_node_network_errors(meter: Meter) -> Counter:
+ """Node network errors"""
+ return meter.create_counter(
+ name=K8S_NODE_NETWORK_ERRORS,
+ description="Node network errors",
+ unit="{error}",
+ )
+
+
+K8S_NODE_NETWORK_IO: Final = "k8s.node.network.io"
+"""
+Network bytes for the Node
+Instrument: counter
+Unit: By
+"""
+
+
+def create_k8s_node_network_io(meter: Meter) -> Counter:
+ """Network bytes for the Node"""
+ return meter.create_counter(
+ name=K8S_NODE_NETWORK_IO,
+ description="Network bytes for the Node",
+ unit="By",
+ )
+
+
+K8S_NODE_UPTIME: Final = "k8s.node.uptime"
+"""
+The time the Node has been running
+Instrument: gauge
+Unit: s
+Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
+The actual accuracy would depend on the instrumentation and operating system.
+"""
+
+
+def create_k8s_node_uptime(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time the Node has been running"""
+ return meter.create_observable_gauge(
+ name=K8S_NODE_UPTIME,
+ callbacks=callbacks,
+ description="The time the Node has been running",
+ unit="s",
+ )
+
+
+K8S_POD_CPU_TIME: Final = "k8s.pod.cpu.time"
+"""
+Total CPU time consumed
+Instrument: counter
+Unit: s
+Note: Total CPU time consumed by the specific Pod on all available CPU cores.
+"""
+
+
+def create_k8s_pod_cpu_time(meter: Meter) -> Counter:
+ """Total CPU time consumed"""
+ return meter.create_counter(
+ name=K8S_POD_CPU_TIME,
+ description="Total CPU time consumed",
+ unit="s",
+ )
+
+
+K8S_POD_CPU_USAGE: Final = "k8s.pod.cpu.usage"
+"""
+Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs
+Instrument: gauge
+Unit: {cpu}
+Note: CPU usage of the specific Pod on all available CPU cores, averaged over the sample window.
+"""
+
+
+def create_k8s_pod_cpu_usage(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"""
+ return meter.create_observable_gauge(
+ name=K8S_POD_CPU_USAGE,
+ callbacks=callbacks,
+ description="Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs",
+ unit="{cpu}",
+ )
+
+
+K8S_POD_MEMORY_USAGE: Final = "k8s.pod.memory.usage"
+"""
+Memory usage of the Pod
+Instrument: gauge
+Unit: By
+Note: Total memory usage of the Pod.
+"""
+
+
+def create_k8s_pod_memory_usage(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Memory usage of the Pod"""
+ return meter.create_observable_gauge(
+ name=K8S_POD_MEMORY_USAGE,
+ callbacks=callbacks,
+ description="Memory usage of the Pod",
+ unit="By",
+ )
+
+
+K8S_POD_NETWORK_ERRORS: Final = "k8s.pod.network.errors"
+"""
+Pod network errors
+Instrument: counter
+Unit: {error}
+"""
+
+
+def create_k8s_pod_network_errors(meter: Meter) -> Counter:
+ """Pod network errors"""
+ return meter.create_counter(
+ name=K8S_POD_NETWORK_ERRORS,
+ description="Pod network errors",
+ unit="{error}",
+ )
+
+
+K8S_POD_NETWORK_IO: Final = "k8s.pod.network.io"
+"""
+Network bytes for the Pod
+Instrument: counter
+Unit: By
+"""
+
+
+def create_k8s_pod_network_io(meter: Meter) -> Counter:
+ """Network bytes for the Pod"""
+ return meter.create_counter(
+ name=K8S_POD_NETWORK_IO,
+ description="Network bytes for the Pod",
+ unit="By",
+ )
+
+
+K8S_POD_UPTIME: Final = "k8s.pod.uptime"
+"""
+The time the Pod has been running
+Instrument: gauge
+Unit: s
+Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
+The actual accuracy would depend on the instrumentation and operating system.
+"""
+
+
+def create_k8s_pod_uptime(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time the Pod has been running"""
+ return meter.create_observable_gauge(
+ name=K8S_POD_UPTIME,
+ callbacks=callbacks,
+ description="The time the Pod has been running",
+ unit="s",
+ )
+
+
+K8S_REPLICASET_AVAILABLE_PODS: Final = "k8s.replicaset.available_pods"
+"""
+Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `availableReplicas` field of the
+[K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.replicaset`](../resource/k8s.md#replicaset) resource.
+"""
+
+
+def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter:
+ """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICASET_AVAILABLE_PODS,
+ description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset",
+ unit="{pod}",
+ )
+
+
+K8S_REPLICASET_DESIRED_PODS: Final = "k8s.replicaset.desired_pods"
+"""
+Number of desired replica pods in this replicaset
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `replicas` field of the
+[K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.replicaset`](../resource/k8s.md#replicaset) resource.
+"""
+
+
+def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter:
+ """Number of desired replica pods in this replicaset"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICASET_DESIRED_PODS,
+ description="Number of desired replica pods in this replicaset",
+ unit="{pod}",
+ )
+
+
+K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: Final = (
+ "k8s.replication_controller.available_pods"
+)
+"""
+Deprecated: Replaced by `k8s.replicationcontroller.available_pods`.
+"""
+
+
+def create_k8s_replication_controller_available_pods(
+ meter: Meter,
+) -> UpDownCounter:
+ """Deprecated, use `k8s.replicationcontroller.available_pods` instead"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS,
+ description="Deprecated, use `k8s.replicationcontroller.available_pods` instead.",
+ unit="{pod}",
+ )
+
+
+K8S_REPLICATION_CONTROLLER_DESIRED_PODS: Final = (
+ "k8s.replication_controller.desired_pods"
+)
+"""
+Deprecated: Replaced by `k8s.replicationcontroller.desired_pods`.
+"""
+
+
+def create_k8s_replication_controller_desired_pods(
+ meter: Meter,
+) -> UpDownCounter:
+ """Deprecated, use `k8s.replicationcontroller.desired_pods` instead"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICATION_CONTROLLER_DESIRED_PODS,
+ description="Deprecated, use `k8s.replicationcontroller.desired_pods` instead.",
+ unit="{pod}",
+ )
+
+
+K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS: Final = (
+ "k8s.replicationcontroller.available_pods"
+)
+"""
+Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `availableReplicas` field of the
+[K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource.
+"""
+
+
+def create_k8s_replicationcontroller_available_pods(
+ meter: Meter,
+) -> UpDownCounter:
+ """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS,
+ description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller",
+ unit="{pod}",
+ )
+
+
+K8S_REPLICATIONCONTROLLER_DESIRED_PODS: Final = (
+ "k8s.replicationcontroller.desired_pods"
+)
+"""
+Number of desired replica pods in this replication controller
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `replicas` field of the
+[K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core)
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.replicationcontroller`](../resource/k8s.md#replicationcontroller) resource.
+"""
+
+
+def create_k8s_replicationcontroller_desired_pods(
+ meter: Meter,
+) -> UpDownCounter:
+ """Number of desired replica pods in this replication controller"""
+ return meter.create_up_down_counter(
+ name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS,
+ description="Number of desired replica pods in this replication controller",
+ unit="{pod}",
+ )
+
+
+K8S_STATEFULSET_CURRENT_PODS: Final = "k8s.statefulset.current_pods"
+"""
+The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `currentReplicas` field of the
+[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.statefulset`](../resource/k8s.md#statefulset) resource.
+"""
+
+
+def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter:
+ """The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision"""
+ return meter.create_up_down_counter(
+ name=K8S_STATEFULSET_CURRENT_PODS,
+ description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision",
+ unit="{pod}",
+ )
+
+
+K8S_STATEFULSET_DESIRED_PODS: Final = "k8s.statefulset.desired_pods"
+"""
+Number of desired replica pods in this statefulset
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `replicas` field of the
+[K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.statefulset`](../resource/k8s.md#statefulset) resource.
+"""
+
+
+def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter:
+ """Number of desired replica pods in this statefulset"""
+ return meter.create_up_down_counter(
+ name=K8S_STATEFULSET_DESIRED_PODS,
+ description="Number of desired replica pods in this statefulset",
+ unit="{pod}",
+ )
+
+
+K8S_STATEFULSET_READY_PODS: Final = "k8s.statefulset.ready_pods"
+"""
+The number of replica pods created for this statefulset with a Ready Condition
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `readyReplicas` field of the
+[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.statefulset`](../resource/k8s.md#statefulset) resource.
+"""
+
+
+def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter:
+ """The number of replica pods created for this statefulset with a Ready Condition"""
+ return meter.create_up_down_counter(
+ name=K8S_STATEFULSET_READY_PODS,
+ description="The number of replica pods created for this statefulset with a Ready Condition",
+ unit="{pod}",
+ )
+
+
+K8S_STATEFULSET_UPDATED_PODS: Final = "k8s.statefulset.updated_pods"
+"""
+Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision
+Instrument: updowncounter
+Unit: {pod}
+Note: This metric aligns with the `updatedReplicas` field of the
+[K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps).
+
+This metric SHOULD, at a minimum, be reported against a
+[`k8s.statefulset`](../resource/k8s.md#statefulset) resource.
+"""
+
+
+def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter:
+ """Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision"""
+ return meter.create_up_down_counter(
+ name=K8S_STATEFULSET_UPDATED_PODS,
+ description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision",
+ unit="{pod}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py
new file mode 100644
index 00000000..0418743f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/messaging_metrics.py
@@ -0,0 +1,186 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Counter, Histogram, Meter
+
+MESSAGING_CLIENT_CONSUMED_MESSAGES: Final = (
+ "messaging.client.consumed.messages"
+)
+"""
+Number of messages that were delivered to the application
+Instrument: counter
+Unit: {message}
+Note: Records the number of messages pulled from the broker or number of messages dispatched to the application in push-based scenarios.
+The metric SHOULD be reported once per message delivery. For example, if receiving and processing operations are both instrumented for a single message delivery, this counter is incremented when the message is received and not reported when it is processed.
+"""
+
+
+def create_messaging_client_consumed_messages(meter: Meter) -> Counter:
+ """Number of messages that were delivered to the application"""
+ return meter.create_counter(
+ name=MESSAGING_CLIENT_CONSUMED_MESSAGES,
+ description="Number of messages that were delivered to the application.",
+ unit="{message}",
+ )
+
+
+MESSAGING_CLIENT_OPERATION_DURATION: Final = (
+ "messaging.client.operation.duration"
+)
+"""
+Duration of messaging operation initiated by a producer or consumer client
+Instrument: histogram
+Unit: s
+Note: This metric SHOULD NOT be used to report processing duration - processing duration is reported in `messaging.process.duration` metric.
+"""
+
+
+def create_messaging_client_operation_duration(meter: Meter) -> Histogram:
+ """Duration of messaging operation initiated by a producer or consumer client"""
+ return meter.create_histogram(
+ name=MESSAGING_CLIENT_OPERATION_DURATION,
+ description="Duration of messaging operation initiated by a producer or consumer client.",
+ unit="s",
+ )
+
+
+MESSAGING_CLIENT_PUBLISHED_MESSAGES: Final = (
+ "messaging.client.published.messages"
+)
+"""
+Deprecated: Replaced by `messaging.client.sent.messages`.
+"""
+
+
+def create_messaging_client_published_messages(meter: Meter) -> Counter:
+ """Deprecated. Use `messaging.client.sent.messages` instead"""
+ return meter.create_counter(
+ name=MESSAGING_CLIENT_PUBLISHED_MESSAGES,
+ description="Deprecated. Use `messaging.client.sent.messages` instead.",
+ unit="{message}",
+ )
+
+
+MESSAGING_CLIENT_SENT_MESSAGES: Final = "messaging.client.sent.messages"
+"""
+Number of messages producer attempted to send to the broker
+Instrument: counter
+Unit: {message}
+Note: This metric MUST NOT count messages that were created but haven't yet been sent.
+"""
+
+
+def create_messaging_client_sent_messages(meter: Meter) -> Counter:
+ """Number of messages producer attempted to send to the broker"""
+ return meter.create_counter(
+ name=MESSAGING_CLIENT_SENT_MESSAGES,
+ description="Number of messages producer attempted to send to the broker.",
+ unit="{message}",
+ )
+
+
+MESSAGING_PROCESS_DURATION: Final = "messaging.process.duration"
+"""
+Duration of processing operation
+Instrument: histogram
+Unit: s
+Note: This metric MUST be reported for operations with `messaging.operation.type` that matches `process`.
+"""
+
+
+def create_messaging_process_duration(meter: Meter) -> Histogram:
+ """Duration of processing operation"""
+ return meter.create_histogram(
+ name=MESSAGING_PROCESS_DURATION,
+ description="Duration of processing operation.",
+ unit="s",
+ )
+
+
+MESSAGING_PROCESS_MESSAGES: Final = "messaging.process.messages"
+"""
+Deprecated: Replaced by `messaging.client.consumed.messages`.
+"""
+
+
+def create_messaging_process_messages(meter: Meter) -> Counter:
+ """Deprecated. Use `messaging.client.consumed.messages` instead"""
+ return meter.create_counter(
+ name=MESSAGING_PROCESS_MESSAGES,
+ description="Deprecated. Use `messaging.client.consumed.messages` instead.",
+ unit="{message}",
+ )
+
+
+MESSAGING_PUBLISH_DURATION: Final = "messaging.publish.duration"
+"""
+Deprecated: Replaced by `messaging.client.operation.duration`.
+"""
+
+
+def create_messaging_publish_duration(meter: Meter) -> Histogram:
+ """Deprecated. Use `messaging.client.operation.duration` instead"""
+ return meter.create_histogram(
+ name=MESSAGING_PUBLISH_DURATION,
+ description="Deprecated. Use `messaging.client.operation.duration` instead.",
+ unit="s",
+ )
+
+
+MESSAGING_PUBLISH_MESSAGES: Final = "messaging.publish.messages"
+"""
+Deprecated: Replaced by `messaging.client.produced.messages`.
+"""
+
+
+def create_messaging_publish_messages(meter: Meter) -> Counter:
+ """Deprecated. Use `messaging.client.produced.messages` instead"""
+ return meter.create_counter(
+ name=MESSAGING_PUBLISH_MESSAGES,
+ description="Deprecated. Use `messaging.client.produced.messages` instead.",
+ unit="{message}",
+ )
+
+
+MESSAGING_RECEIVE_DURATION: Final = "messaging.receive.duration"
+"""
+Deprecated: Replaced by `messaging.client.operation.duration`.
+"""
+
+
+def create_messaging_receive_duration(meter: Meter) -> Histogram:
+ """Deprecated. Use `messaging.client.operation.duration` instead"""
+ return meter.create_histogram(
+ name=MESSAGING_RECEIVE_DURATION,
+ description="Deprecated. Use `messaging.client.operation.duration` instead.",
+ unit="s",
+ )
+
+
+MESSAGING_RECEIVE_MESSAGES: Final = "messaging.receive.messages"
+"""
+Deprecated: Replaced by `messaging.client.consumed.messages`.
+"""
+
+
+def create_messaging_receive_messages(meter: Meter) -> Counter:
+ """Deprecated. Use `messaging.client.consumed.messages` instead"""
+ return meter.create_counter(
+ name=MESSAGING_RECEIVE_MESSAGES,
+ description="Deprecated. Use `messaging.client.consumed.messages` instead.",
+ unit="{message}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py
new file mode 100644
index 00000000..ab9a8f1f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/otel_metrics.py
@@ -0,0 +1,162 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Counter, Meter, UpDownCounter
+
+OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT: Final = (
+ "otel.sdk.exporter.span.exported.count"
+)
+"""
+The number of spans for which the export has finished, either successful or failed
+Instrument: counter
+Unit: {span}
+Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` must contain the failure cause.
+For exporters with partial success semantics (e.g. OTLP with `rejected_spans`), rejected spans must count as failed and only non-rejected spans count as success.
+If no rejection reason is available, `rejected` SHOULD be used as value for `error.type`.
+"""
+
+
+def create_otel_sdk_exporter_span_exported_count(meter: Meter) -> Counter:
+ """The number of spans for which the export has finished, either successful or failed"""
+ return meter.create_counter(
+ name=OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT,
+ description="The number of spans for which the export has finished, either successful or failed",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT: Final = (
+ "otel.sdk.exporter.span.inflight.count"
+)
+"""
+The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)
+Instrument: updowncounter
+Unit: {span}
+Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` must contain the failure cause.
+"""
+
+
+def create_otel_sdk_exporter_span_inflight_count(
+ meter: Meter,
+) -> UpDownCounter:
+ """The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)"""
+ return meter.create_up_down_counter(
+ name=OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT,
+ description="The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT: Final = (
+ "otel.sdk.processor.span.processed.count"
+)
+"""
+The number of spans for which the processing has finished, either successful or failed
+Instrument: counter
+Unit: {span}
+Note: For successful processing, `error.type` MUST NOT be set. For failed processing, `error.type` must contain the failure cause.
+For the SDK Simple and Batching Span Processor a span is considered to be processed already when it has been submitted to the exporter, not when the corresponding export call has finished.
+"""
+
+
+def create_otel_sdk_processor_span_processed_count(meter: Meter) -> Counter:
+ """The number of spans for which the processing has finished, either successful or failed"""
+ return meter.create_counter(
+ name=OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT,
+ description="The number of spans for which the processing has finished, either successful or failed",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY: Final = (
+ "otel.sdk.processor.span.queue.capacity"
+)
+"""
+The maximum number of spans the queue of a given instance of an SDK span processor can hold
+Instrument: updowncounter
+Unit: {span}
+Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor.
+"""
+
+
+def create_otel_sdk_processor_span_queue_capacity(
+ meter: Meter,
+) -> UpDownCounter:
+ """The maximum number of spans the queue of a given instance of an SDK span processor can hold"""
+ return meter.create_up_down_counter(
+ name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY,
+ description="The maximum number of spans the queue of a given instance of an SDK span processor can hold",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE: Final = (
+ "otel.sdk.processor.span.queue.size"
+)
+"""
+The number of spans in the queue of a given instance of an SDK span processor
+Instrument: updowncounter
+Unit: {span}
+Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor.
+"""
+
+
+def create_otel_sdk_processor_span_queue_size(meter: Meter) -> UpDownCounter:
+ """The number of spans in the queue of a given instance of an SDK span processor"""
+ return meter.create_up_down_counter(
+ name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE,
+ description="The number of spans in the queue of a given instance of an SDK span processor",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_SPAN_ENDED_COUNT: Final = "otel.sdk.span.ended.count"
+"""
+The number of created spans for which the end operation was called
+Instrument: counter
+Unit: {span}
+Note: For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live.count` and `otel.sdk.span.ended.count`.
+For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.live.count`.
+"""
+
+
+def create_otel_sdk_span_ended_count(meter: Meter) -> Counter:
+ """The number of created spans for which the end operation was called"""
+ return meter.create_counter(
+ name=OTEL_SDK_SPAN_ENDED_COUNT,
+ description="The number of created spans for which the end operation was called",
+ unit="{span}",
+ )
+
+
+OTEL_SDK_SPAN_LIVE_COUNT: Final = "otel.sdk.span.live.count"
+"""
+The number of created spans for which the end operation has not been called yet
+Instrument: updowncounter
+Unit: {span}
+Note: For spans with `recording=true`: Implementations MUST record both `otel.sdk.span.live.count` and `otel.sdk.span.ended.count`.
+For spans with `recording=false`: If implementations decide to record this metric, they MUST also record `otel.sdk.span.ended.count`.
+"""
+
+
+def create_otel_sdk_span_live_count(meter: Meter) -> UpDownCounter:
+ """The number of created spans for which the end operation has not been called yet"""
+ return meter.create_up_down_counter(
+ name=OTEL_SDK_SPAN_LIVE_COUNT,
+ description="The number of created spans for which the end operation has not been called yet",
+ unit="{span}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py
new file mode 100644
index 00000000..902d79de
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/process_metrics.py
@@ -0,0 +1,235 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+ UpDownCounter,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+PROCESS_CONTEXT_SWITCHES: Final = "process.context_switches"
+"""
+Number of times the process has been context switched
+Instrument: counter
+Unit: {context_switch}
+"""
+
+
+def create_process_context_switches(meter: Meter) -> Counter:
+ """Number of times the process has been context switched"""
+ return meter.create_counter(
+ name=PROCESS_CONTEXT_SWITCHES,
+ description="Number of times the process has been context switched.",
+ unit="{context_switch}",
+ )
+
+
+PROCESS_CPU_TIME: Final = "process.cpu.time"
+"""
+Total CPU seconds broken down by different states
+Instrument: counter
+Unit: s
+"""
+
+
+def create_process_cpu_time(meter: Meter) -> Counter:
+ """Total CPU seconds broken down by different states"""
+ return meter.create_counter(
+ name=PROCESS_CPU_TIME,
+ description="Total CPU seconds broken down by different states.",
+ unit="s",
+ )
+
+
+PROCESS_CPU_UTILIZATION: Final = "process.cpu.utilization"
+"""
+Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process
+Instrument: gauge
+Unit: 1
+"""
+
+
+def create_process_cpu_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process"""
+ return meter.create_observable_gauge(
+ name=PROCESS_CPU_UTILIZATION,
+ callbacks=callbacks,
+ description="Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process.",
+ unit="1",
+ )
+
+
+PROCESS_DISK_IO: Final = "process.disk.io"
+"""
+Disk bytes transferred
+Instrument: counter
+Unit: By
+"""
+
+
+def create_process_disk_io(meter: Meter) -> Counter:
+ """Disk bytes transferred"""
+ return meter.create_counter(
+ name=PROCESS_DISK_IO,
+ description="Disk bytes transferred.",
+ unit="By",
+ )
+
+
+PROCESS_MEMORY_USAGE: Final = "process.memory.usage"
+"""
+The amount of physical memory in use
+Instrument: updowncounter
+Unit: By
+"""
+
+
+def create_process_memory_usage(meter: Meter) -> UpDownCounter:
+ """The amount of physical memory in use"""
+ return meter.create_up_down_counter(
+ name=PROCESS_MEMORY_USAGE,
+ description="The amount of physical memory in use.",
+ unit="By",
+ )
+
+
+PROCESS_MEMORY_VIRTUAL: Final = "process.memory.virtual"
+"""
+The amount of committed virtual memory
+Instrument: updowncounter
+Unit: By
+"""
+
+
+def create_process_memory_virtual(meter: Meter) -> UpDownCounter:
+ """The amount of committed virtual memory"""
+ return meter.create_up_down_counter(
+ name=PROCESS_MEMORY_VIRTUAL,
+ description="The amount of committed virtual memory.",
+ unit="By",
+ )
+
+
+PROCESS_NETWORK_IO: Final = "process.network.io"
+"""
+Network bytes transferred
+Instrument: counter
+Unit: By
+"""
+
+
+def create_process_network_io(meter: Meter) -> Counter:
+ """Network bytes transferred"""
+ return meter.create_counter(
+ name=PROCESS_NETWORK_IO,
+ description="Network bytes transferred.",
+ unit="By",
+ )
+
+
+PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: Final = (
+ "process.open_file_descriptor.count"
+)
+"""
+Number of file descriptors in use by the process
+Instrument: updowncounter
+Unit: {file_descriptor}
+"""
+
+
+def create_process_open_file_descriptor_count(meter: Meter) -> UpDownCounter:
+ """Number of file descriptors in use by the process"""
+ return meter.create_up_down_counter(
+ name=PROCESS_OPEN_FILE_DESCRIPTOR_COUNT,
+ description="Number of file descriptors in use by the process.",
+ unit="{file_descriptor}",
+ )
+
+
+PROCESS_PAGING_FAULTS: Final = "process.paging.faults"
+"""
+Number of page faults the process has made
+Instrument: counter
+Unit: {fault}
+"""
+
+
+def create_process_paging_faults(meter: Meter) -> Counter:
+ """Number of page faults the process has made"""
+ return meter.create_counter(
+ name=PROCESS_PAGING_FAULTS,
+ description="Number of page faults the process has made.",
+ unit="{fault}",
+ )
+
+
+PROCESS_THREAD_COUNT: Final = "process.thread.count"
+"""
+Process threads count
+Instrument: updowncounter
+Unit: {thread}
+"""
+
+
+def create_process_thread_count(meter: Meter) -> UpDownCounter:
+ """Process threads count"""
+ return meter.create_up_down_counter(
+ name=PROCESS_THREAD_COUNT,
+ description="Process threads count.",
+ unit="{thread}",
+ )
+
+
+PROCESS_UPTIME: Final = "process.uptime"
+"""
+The time the process has been running
+Instrument: gauge
+Unit: s
+Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
+The actual accuracy would depend on the instrumentation and operating system.
+"""
+
+
+def create_process_uptime(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time the process has been running"""
+ return meter.create_observable_gauge(
+ name=PROCESS_UPTIME,
+ callbacks=callbacks,
+ description="The time the process has been running.",
+ unit="s",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py
new file mode 100644
index 00000000..e3f4ad6e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py
@@ -0,0 +1,211 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Final
+
+from opentelemetry.metrics import Histogram, Meter
+
+RPC_CLIENT_DURATION: Final = "rpc.client.duration"
+"""
+Measures the duration of outbound RPC
+Instrument: histogram
+Unit: ms
+Note: While streaming RPCs may record this metric as start-of-batch
+to end-of-batch, it's hard to interpret in practice.
+
+**Streaming**: N/A.
+"""
+
+
+def create_rpc_client_duration(meter: Meter) -> Histogram:
+ """Measures the duration of outbound RPC"""
+ return meter.create_histogram(
+ name=RPC_CLIENT_DURATION,
+ description="Measures the duration of outbound RPC.",
+ unit="ms",
+ )
+
+
+RPC_CLIENT_REQUEST_SIZE: Final = "rpc.client.request.size"
+"""
+Measures the size of RPC request messages (uncompressed)
+Instrument: histogram
+Unit: By
+Note: **Streaming**: Recorded per message in a streaming batch.
+"""
+
+
+def create_rpc_client_request_size(meter: Meter) -> Histogram:
+ """Measures the size of RPC request messages (uncompressed)"""
+ return meter.create_histogram(
+ name=RPC_CLIENT_REQUEST_SIZE,
+ description="Measures the size of RPC request messages (uncompressed).",
+ unit="By",
+ )
+
+
+RPC_CLIENT_REQUESTS_PER_RPC: Final = "rpc.client.requests_per_rpc"
+"""
+Measures the number of messages received per RPC
+Instrument: histogram
+Unit: {count}
+Note: Should be 1 for all non-streaming RPCs.
+
+**Streaming**: This metric is required for server and client streaming RPCs.
+"""
+
+
+def create_rpc_client_requests_per_rpc(meter: Meter) -> Histogram:
+ """Measures the number of messages received per RPC"""
+ return meter.create_histogram(
+ name=RPC_CLIENT_REQUESTS_PER_RPC,
+ description="Measures the number of messages received per RPC.",
+ unit="{count}",
+ )
+
+
+RPC_CLIENT_RESPONSE_SIZE: Final = "rpc.client.response.size"
+"""
+Measures the size of RPC response messages (uncompressed)
+Instrument: histogram
+Unit: By
+Note: **Streaming**: Recorded per response in a streaming batch.
+"""
+
+
+def create_rpc_client_response_size(meter: Meter) -> Histogram:
+ """Measures the size of RPC response messages (uncompressed)"""
+ return meter.create_histogram(
+ name=RPC_CLIENT_RESPONSE_SIZE,
+ description="Measures the size of RPC response messages (uncompressed).",
+ unit="By",
+ )
+
+
+RPC_CLIENT_RESPONSES_PER_RPC: Final = "rpc.client.responses_per_rpc"
+"""
+Measures the number of messages sent per RPC
+Instrument: histogram
+Unit: {count}
+Note: Should be 1 for all non-streaming RPCs.
+
+**Streaming**: This metric is required for server and client streaming RPCs.
+"""
+
+
+def create_rpc_client_responses_per_rpc(meter: Meter) -> Histogram:
+ """Measures the number of messages sent per RPC"""
+ return meter.create_histogram(
+ name=RPC_CLIENT_RESPONSES_PER_RPC,
+ description="Measures the number of messages sent per RPC.",
+ unit="{count}",
+ )
+
+
+RPC_SERVER_DURATION: Final = "rpc.server.duration"
+"""
+Measures the duration of inbound RPC
+Instrument: histogram
+Unit: ms
+Note: While streaming RPCs may record this metric as start-of-batch
+to end-of-batch, it's hard to interpret in practice.
+
+**Streaming**: N/A.
+"""
+
+
+def create_rpc_server_duration(meter: Meter) -> Histogram:
+ """Measures the duration of inbound RPC"""
+ return meter.create_histogram(
+ name=RPC_SERVER_DURATION,
+ description="Measures the duration of inbound RPC.",
+ unit="ms",
+ )
+
+
+RPC_SERVER_REQUEST_SIZE: Final = "rpc.server.request.size"
+"""
+Measures the size of RPC request messages (uncompressed)
+Instrument: histogram
+Unit: By
+Note: **Streaming**: Recorded per message in a streaming batch.
+"""
+
+
+def create_rpc_server_request_size(meter: Meter) -> Histogram:
+ """Measures the size of RPC request messages (uncompressed)"""
+ return meter.create_histogram(
+ name=RPC_SERVER_REQUEST_SIZE,
+ description="Measures the size of RPC request messages (uncompressed).",
+ unit="By",
+ )
+
+
+RPC_SERVER_REQUESTS_PER_RPC: Final = "rpc.server.requests_per_rpc"
+"""
+Measures the number of messages received per RPC
+Instrument: histogram
+Unit: {count}
+Note: Should be 1 for all non-streaming RPCs.
+
+**Streaming** : This metric is required for server and client streaming RPCs.
+"""
+
+
+def create_rpc_server_requests_per_rpc(meter: Meter) -> Histogram:
+ """Measures the number of messages received per RPC"""
+ return meter.create_histogram(
+ name=RPC_SERVER_REQUESTS_PER_RPC,
+ description="Measures the number of messages received per RPC.",
+ unit="{count}",
+ )
+
+
+RPC_SERVER_RESPONSE_SIZE: Final = "rpc.server.response.size"
+"""
+Measures the size of RPC response messages (uncompressed)
+Instrument: histogram
+Unit: By
+Note: **Streaming**: Recorded per response in a streaming batch.
+"""
+
+
+def create_rpc_server_response_size(meter: Meter) -> Histogram:
+ """Measures the size of RPC response messages (uncompressed)"""
+ return meter.create_histogram(
+ name=RPC_SERVER_RESPONSE_SIZE,
+ description="Measures the size of RPC response messages (uncompressed).",
+ unit="By",
+ )
+
+
+RPC_SERVER_RESPONSES_PER_RPC: Final = "rpc.server.responses_per_rpc"
+"""
+Measures the number of messages sent per RPC
+Instrument: histogram
+Unit: {count}
+Note: Should be 1 for all non-streaming RPCs.
+
+**Streaming**: This metric is required for server and client streaming RPCs.
+"""
+
+
+def create_rpc_server_responses_per_rpc(meter: Meter) -> Histogram:
+ """Measures the number of messages sent per RPC"""
+ return meter.create_histogram(
+ name=RPC_SERVER_RESPONSES_PER_RPC,
+ description="Measures the number of messages sent per RPC.",
+ unit="{count}",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py
new file mode 100644
index 00000000..df2a6571
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/system_metrics.py
@@ -0,0 +1,611 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Counter,
+ Meter,
+ ObservableGauge,
+ Observation,
+ UpDownCounter,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+SYSTEM_CPU_FREQUENCY: Final = "system.cpu.frequency"
+"""
+Deprecated: Replaced by `cpu.frequency`.
+"""
+
+
+def create_system_cpu_frequency(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Deprecated. Use `cpu.frequency` instead"""
+ return meter.create_observable_gauge(
+ name=SYSTEM_CPU_FREQUENCY,
+ callbacks=callbacks,
+ description="Deprecated. Use `cpu.frequency` instead.",
+ unit="{Hz}",
+ )
+
+
+SYSTEM_CPU_LOGICAL_COUNT: Final = "system.cpu.logical.count"
+"""
+Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking
+Instrument: updowncounter
+Unit: {cpu}
+Note: Calculated by multiplying the number of sockets by the number of cores per socket, and then by the number of threads per core.
+"""
+
+
+def create_system_cpu_logical_count(meter: Meter) -> UpDownCounter:
+ """Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_CPU_LOGICAL_COUNT,
+ description="Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking",
+ unit="{cpu}",
+ )
+
+
+SYSTEM_CPU_PHYSICAL_COUNT: Final = "system.cpu.physical.count"
+"""
+Reports the number of actual physical processor cores on the hardware
+Instrument: updowncounter
+Unit: {cpu}
+Note: Calculated by multiplying the number of sockets by the number of cores per socket.
+"""
+
+
+def create_system_cpu_physical_count(meter: Meter) -> UpDownCounter:
+ """Reports the number of actual physical processor cores on the hardware"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_CPU_PHYSICAL_COUNT,
+ description="Reports the number of actual physical processor cores on the hardware",
+ unit="{cpu}",
+ )
+
+
+SYSTEM_CPU_TIME: Final = "system.cpu.time"
+"""
+Deprecated: Replaced by `cpu.time`.
+"""
+
+
+def create_system_cpu_time(meter: Meter) -> Counter:
+ """Deprecated. Use `cpu.time` instead"""
+ return meter.create_counter(
+ name=SYSTEM_CPU_TIME,
+ description="Deprecated. Use `cpu.time` instead.",
+ unit="s",
+ )
+
+
+SYSTEM_CPU_UTILIZATION: Final = "system.cpu.utilization"
+"""
+Deprecated: Replaced by `cpu.utilization`.
+"""
+
+
+def create_system_cpu_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Deprecated. Use `cpu.utilization` instead"""
+ return meter.create_observable_gauge(
+ name=SYSTEM_CPU_UTILIZATION,
+ callbacks=callbacks,
+ description="Deprecated. Use `cpu.utilization` instead.",
+ unit="1",
+ )
+
+
+SYSTEM_DISK_IO: Final = "system.disk.io"
+"""
+Instrument: counter
+Unit: By
+"""
+
+
+def create_system_disk_io(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_DISK_IO,
+ description="",
+ unit="By",
+ )
+
+
+SYSTEM_DISK_IO_TIME: Final = "system.disk.io_time"
+"""
+Time disk spent activated
+Instrument: counter
+Unit: s
+Note: The real elapsed time ("wall clock") used in the I/O path (time from operations running in parallel are not counted). Measured as:
+
+- Linux: Field 13 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats)
+- Windows: The complement of
+ ["Disk\\% Idle Time"](https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained)
+ performance counter: `uptime * (100 - "Disk\\% Idle Time") / 100`.
+"""
+
+
+def create_system_disk_io_time(meter: Meter) -> Counter:
+ """Time disk spent activated"""
+ return meter.create_counter(
+ name=SYSTEM_DISK_IO_TIME,
+ description="Time disk spent activated",
+ unit="s",
+ )
+
+
+SYSTEM_DISK_LIMIT: Final = "system.disk.limit"
+"""
+The total storage capacity of the disk
+Instrument: updowncounter
+Unit: By
+"""
+
+
+def create_system_disk_limit(meter: Meter) -> UpDownCounter:
+ """The total storage capacity of the disk"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_DISK_LIMIT,
+ description="The total storage capacity of the disk",
+ unit="By",
+ )
+
+
+SYSTEM_DISK_MERGED: Final = "system.disk.merged"
+"""
+Instrument: counter
+Unit: {operation}
+"""
+
+
+def create_system_disk_merged(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_DISK_MERGED,
+ description="",
+ unit="{operation}",
+ )
+
+
+SYSTEM_DISK_OPERATION_TIME: Final = "system.disk.operation_time"
+"""
+Sum of the time each operation took to complete
+Instrument: counter
+Unit: s
+Note: Because it is the sum of time each request took, parallel-issued requests each contribute to make the count grow. Measured as:
+
+- Linux: Fields 7 & 11 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats)
+- Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" perf counter (similar for Writes).
+"""
+
+
+def create_system_disk_operation_time(meter: Meter) -> Counter:
+ """Sum of the time each operation took to complete"""
+ return meter.create_counter(
+ name=SYSTEM_DISK_OPERATION_TIME,
+ description="Sum of the time each operation took to complete",
+ unit="s",
+ )
+
+
+SYSTEM_DISK_OPERATIONS: Final = "system.disk.operations"
+"""
+Instrument: counter
+Unit: {operation}
+"""
+
+
+def create_system_disk_operations(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_DISK_OPERATIONS,
+ description="",
+ unit="{operation}",
+ )
+
+
+SYSTEM_FILESYSTEM_LIMIT: Final = "system.filesystem.limit"
+"""
+The total storage capacity of the filesystem
+Instrument: updowncounter
+Unit: By
+"""
+
+
+def create_system_filesystem_limit(meter: Meter) -> UpDownCounter:
+ """The total storage capacity of the filesystem"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_FILESYSTEM_LIMIT,
+ description="The total storage capacity of the filesystem",
+ unit="By",
+ )
+
+
+SYSTEM_FILESYSTEM_USAGE: Final = "system.filesystem.usage"
+"""
+Reports a filesystem's space usage across different states
+Instrument: updowncounter
+Unit: By
+Note: The sum of all `system.filesystem.usage` values over the different `system.filesystem.state` attributes
+SHOULD equal the total storage capacity of the filesystem, that is `system.filesystem.limit`.
+"""
+
+
+def create_system_filesystem_usage(meter: Meter) -> UpDownCounter:
+ """Reports a filesystem's space usage across different states"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_FILESYSTEM_USAGE,
+ description="Reports a filesystem's space usage across different states.",
+ unit="By",
+ )
+
+
+SYSTEM_FILESYSTEM_UTILIZATION: Final = "system.filesystem.utilization"
+"""
+Instrument: gauge
+Unit: 1
+"""
+
+
+def create_system_filesystem_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ return meter.create_observable_gauge(
+ name=SYSTEM_FILESYSTEM_UTILIZATION,
+ callbacks=callbacks,
+ description="",
+ unit="1",
+ )
+
+
+SYSTEM_LINUX_MEMORY_AVAILABLE: Final = "system.linux.memory.available"
+"""
+An estimate of how much memory is available for starting new applications, without causing swapping
+Instrument: updowncounter
+Unit: By
+Note: This is an alternative to `system.memory.usage` metric with `state=free`.
+Linux starting from 3.14 exports "available" memory. It takes "free" memory as a baseline, and then factors in kernel-specific values.
+This is supposed to be more accurate than just "free" memory.
+For reference, see the calculations [here](https://superuser.com/a/980821).
+See also `MemAvailable` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html).
+"""
+
+
+def create_system_linux_memory_available(meter: Meter) -> UpDownCounter:
+ """An estimate of how much memory is available for starting new applications, without causing swapping"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_LINUX_MEMORY_AVAILABLE,
+ description="An estimate of how much memory is available for starting new applications, without causing swapping",
+ unit="By",
+ )
+
+
+SYSTEM_LINUX_MEMORY_SLAB_USAGE: Final = "system.linux.memory.slab.usage"
+"""
+Reports the memory used by the Linux kernel for managing caches of frequently used objects
+Instrument: updowncounter
+Unit: By
+Note: The sum over the `reclaimable` and `unreclaimable` state values in `linux.memory.slab.usage` SHOULD be equal to the total slab memory available on the system.
+Note that the total slab memory is not constant and may vary over time.
+See also the [Slab allocator](https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics) and `Slab` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html).
+"""
+
+
+def create_system_linux_memory_slab_usage(meter: Meter) -> UpDownCounter:
+ """Reports the memory used by the Linux kernel for managing caches of frequently used objects"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_LINUX_MEMORY_SLAB_USAGE,
+ description="Reports the memory used by the Linux kernel for managing caches of frequently used objects.",
+ unit="By",
+ )
+
+
+SYSTEM_MEMORY_LIMIT: Final = "system.memory.limit"
+"""
+Total memory available in the system
+Instrument: updowncounter
+Unit: By
+Note: Its value SHOULD equal the sum of `system.memory.state` over all states.
+"""
+
+
+def create_system_memory_limit(meter: Meter) -> UpDownCounter:
+ """Total memory available in the system"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_MEMORY_LIMIT,
+ description="Total memory available in the system.",
+ unit="By",
+ )
+
+
+SYSTEM_MEMORY_SHARED: Final = "system.memory.shared"
+"""
+Shared memory used (mostly by tmpfs)
+Instrument: updowncounter
+Unit: By
+Note: Equivalent of `shared` from [`free` command](https://man7.org/linux/man-pages/man1/free.1.html) or
+`Shmem` from [`/proc/meminfo`](https://man7.org/linux/man-pages/man5/proc.5.html)".
+"""
+
+
+def create_system_memory_shared(meter: Meter) -> UpDownCounter:
+ """Shared memory used (mostly by tmpfs)"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_MEMORY_SHARED,
+ description="Shared memory used (mostly by tmpfs).",
+ unit="By",
+ )
+
+
+SYSTEM_MEMORY_USAGE: Final = "system.memory.usage"
+"""
+Reports memory in use by state
+Instrument: updowncounter
+Unit: By
+Note: The sum over all `system.memory.state` values SHOULD equal the total memory
+available on the system, that is `system.memory.limit`.
+"""
+
+
+def create_system_memory_usage(meter: Meter) -> UpDownCounter:
+ """Reports memory in use by state"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_MEMORY_USAGE,
+ description="Reports memory in use by state.",
+ unit="By",
+ )
+
+
+SYSTEM_MEMORY_UTILIZATION: Final = "system.memory.utilization"
+"""
+Instrument: gauge
+Unit: 1
+"""
+
+
+def create_system_memory_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ return meter.create_observable_gauge(
+ name=SYSTEM_MEMORY_UTILIZATION,
+ callbacks=callbacks,
+ description="",
+ unit="1",
+ )
+
+
+SYSTEM_NETWORK_CONNECTIONS: Final = "system.network.connections"
+"""
+Instrument: updowncounter
+Unit: {connection}
+"""
+
+
+def create_system_network_connections(meter: Meter) -> UpDownCounter:
+ return meter.create_up_down_counter(
+ name=SYSTEM_NETWORK_CONNECTIONS,
+ description="",
+ unit="{connection}",
+ )
+
+
+SYSTEM_NETWORK_DROPPED: Final = "system.network.dropped"
+"""
+Count of packets that are dropped or discarded even though there was no error
+Instrument: counter
+Unit: {packet}
+Note: Measured as:
+
+- Linux: the `drop` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html))
+- Windows: [`InDiscards`/`OutDiscards`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2)
+ from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2).
+"""
+
+
+def create_system_network_dropped(meter: Meter) -> Counter:
+ """Count of packets that are dropped or discarded even though there was no error"""
+ return meter.create_counter(
+ name=SYSTEM_NETWORK_DROPPED,
+ description="Count of packets that are dropped or discarded even though there was no error",
+ unit="{packet}",
+ )
+
+
+SYSTEM_NETWORK_ERRORS: Final = "system.network.errors"
+"""
+Count of network errors detected
+Instrument: counter
+Unit: {error}
+Note: Measured as:
+
+- Linux: the `errs` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)).
+- Windows: [`InErrors`/`OutErrors`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2)
+ from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2).
+"""
+
+
+def create_system_network_errors(meter: Meter) -> Counter:
+ """Count of network errors detected"""
+ return meter.create_counter(
+ name=SYSTEM_NETWORK_ERRORS,
+ description="Count of network errors detected",
+ unit="{error}",
+ )
+
+
+SYSTEM_NETWORK_IO: Final = "system.network.io"
+"""
+Instrument: counter
+Unit: By
+"""
+
+
+def create_system_network_io(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_NETWORK_IO,
+ description="",
+ unit="By",
+ )
+
+
+SYSTEM_NETWORK_PACKETS: Final = "system.network.packets"
+"""
+Instrument: counter
+Unit: {packet}
+"""
+
+
+def create_system_network_packets(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_NETWORK_PACKETS,
+ description="",
+ unit="{packet}",
+ )
+
+
+SYSTEM_PAGING_FAULTS: Final = "system.paging.faults"
+"""
+Instrument: counter
+Unit: {fault}
+"""
+
+
+def create_system_paging_faults(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_PAGING_FAULTS,
+ description="",
+ unit="{fault}",
+ )
+
+
+SYSTEM_PAGING_OPERATIONS: Final = "system.paging.operations"
+"""
+Instrument: counter
+Unit: {operation}
+"""
+
+
+def create_system_paging_operations(meter: Meter) -> Counter:
+ return meter.create_counter(
+ name=SYSTEM_PAGING_OPERATIONS,
+ description="",
+ unit="{operation}",
+ )
+
+
+SYSTEM_PAGING_USAGE: Final = "system.paging.usage"
+"""
+Unix swap or windows pagefile usage
+Instrument: updowncounter
+Unit: By
+"""
+
+
+def create_system_paging_usage(meter: Meter) -> UpDownCounter:
+ """Unix swap or windows pagefile usage"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_PAGING_USAGE,
+ description="Unix swap or windows pagefile usage",
+ unit="By",
+ )
+
+
+SYSTEM_PAGING_UTILIZATION: Final = "system.paging.utilization"
+"""
+Instrument: gauge
+Unit: 1
+"""
+
+
+def create_system_paging_utilization(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ return meter.create_observable_gauge(
+ name=SYSTEM_PAGING_UTILIZATION,
+ callbacks=callbacks,
+ description="",
+ unit="1",
+ )
+
+
+SYSTEM_PROCESS_COUNT: Final = "system.process.count"
+"""
+Total number of processes in each state
+Instrument: updowncounter
+Unit: {process}
+"""
+
+
+def create_system_process_count(meter: Meter) -> UpDownCounter:
+ """Total number of processes in each state"""
+ return meter.create_up_down_counter(
+ name=SYSTEM_PROCESS_COUNT,
+ description="Total number of processes in each state",
+ unit="{process}",
+ )
+
+
+SYSTEM_PROCESS_CREATED: Final = "system.process.created"
+"""
+Total number of processes created over uptime of the host
+Instrument: counter
+Unit: {process}
+"""
+
+
+def create_system_process_created(meter: Meter) -> Counter:
+ """Total number of processes created over uptime of the host"""
+ return meter.create_counter(
+ name=SYSTEM_PROCESS_CREATED,
+ description="Total number of processes created over uptime of the host",
+ unit="{process}",
+ )
+
+
+SYSTEM_UPTIME: Final = "system.uptime"
+"""
+The time the system has been running
+Instrument: gauge
+Unit: s
+Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available.
+The actual accuracy would depend on the instrumentation and operating system.
+"""
+
+
+def create_system_uptime(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time the system has been running"""
+ return meter.create_observable_gauge(
+ name=SYSTEM_UPTIME,
+ callbacks=callbacks,
+ description="The time the system has been running",
+ unit="s",
+ )
diff --git a/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py
new file mode 100644
index 00000000..c232751c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/opentelemetry/semconv/_incubating/metrics/vcs_metrics.py
@@ -0,0 +1,233 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import (
+ Callable,
+ Final,
+ Generator,
+ Iterable,
+ Optional,
+ Sequence,
+ Union,
+)
+
+from opentelemetry.metrics import (
+ CallbackOptions,
+ Meter,
+ ObservableGauge,
+ Observation,
+ UpDownCounter,
+)
+
+# pylint: disable=invalid-name
+CallbackT = Union[
+ Callable[[CallbackOptions], Iterable[Observation]],
+ Generator[Iterable[Observation], CallbackOptions, None],
+]
+
+VCS_CHANGE_COUNT: Final = "vcs.change.count"
+"""
+The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)
+Instrument: updowncounter
+Unit: {change}
+"""
+
+
+def create_vcs_change_count(meter: Meter) -> UpDownCounter:
+ """The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)"""
+ return meter.create_up_down_counter(
+ name=VCS_CHANGE_COUNT,
+ description="The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)",
+ unit="{change}",
+ )
+
+
+VCS_CHANGE_DURATION: Final = "vcs.change.duration"
+"""
+The time duration a change (pull request/merge request/changelist) has been in a given state
+Instrument: gauge
+Unit: s
+"""
+
+
+def create_vcs_change_duration(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The time duration a change (pull request/merge request/changelist) has been in a given state"""
+ return meter.create_observable_gauge(
+ name=VCS_CHANGE_DURATION,
+ callbacks=callbacks,
+ description="The time duration a change (pull request/merge request/changelist) has been in a given state.",
+ unit="s",
+ )
+
+
+VCS_CHANGE_TIME_TO_APPROVAL: Final = "vcs.change.time_to_approval"
+"""
+The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval
+Instrument: gauge
+Unit: s
+"""
+
+
+def create_vcs_change_time_to_approval(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval"""
+ return meter.create_observable_gauge(
+ name=VCS_CHANGE_TIME_TO_APPROVAL,
+ callbacks=callbacks,
+ description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval.",
+ unit="s",
+ )
+
+
+VCS_CHANGE_TIME_TO_MERGE: Final = "vcs.change.time_to_merge"
+"""
+The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref
+Instrument: gauge
+Unit: s
+"""
+
+
+def create_vcs_change_time_to_merge(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref"""
+ return meter.create_observable_gauge(
+ name=VCS_CHANGE_TIME_TO_MERGE,
+ callbacks=callbacks,
+ description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref.",
+ unit="s",
+ )
+
+
+VCS_CONTRIBUTOR_COUNT: Final = "vcs.contributor.count"
+"""
+The number of unique contributors to a repository
+Instrument: gauge
+Unit: {contributor}
+"""
+
+
+def create_vcs_contributor_count(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The number of unique contributors to a repository"""
+ return meter.create_observable_gauge(
+ name=VCS_CONTRIBUTOR_COUNT,
+ callbacks=callbacks,
+ description="The number of unique contributors to a repository",
+ unit="{contributor}",
+ )
+
+
+VCS_REF_COUNT: Final = "vcs.ref.count"
+"""
+The number of refs of type branch or tag in a repository
+Instrument: updowncounter
+Unit: {ref}
+"""
+
+
+def create_vcs_ref_count(meter: Meter) -> UpDownCounter:
+ """The number of refs of type branch or tag in a repository"""
+ return meter.create_up_down_counter(
+ name=VCS_REF_COUNT,
+ description="The number of refs of type branch or tag in a repository.",
+ unit="{ref}",
+ )
+
+
+VCS_REF_LINES_DELTA: Final = "vcs.ref.lines_delta"
+"""
+The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute
+Instrument: gauge
+Unit: {line}
+Note: This metric should be reported for each `vcs.line_change.type` value. For example if a ref added 3 lines and removed 2 lines,
+instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers).
+If number of lines added/removed should be calculated from the start of time, then `vcs.ref.base.name` SHOULD be set to an empty string.
+"""
+
+
+def create_vcs_ref_lines_delta(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute"""
+ return meter.create_observable_gauge(
+ name=VCS_REF_LINES_DELTA,
+ callbacks=callbacks,
+ description="The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute.",
+ unit="{line}",
+ )
+
+
+VCS_REF_REVISIONS_DELTA: Final = "vcs.ref.revisions_delta"
+"""
+The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute
+Instrument: gauge
+Unit: {revision}
+Note: This metric should be reported for each `vcs.revision_delta.direction` value. For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`,
+instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers) and `vcs.ref.base.name` is set to `trunk`.
+"""
+
+
+def create_vcs_ref_revisions_delta(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute"""
+ return meter.create_observable_gauge(
+ name=VCS_REF_REVISIONS_DELTA,
+ callbacks=callbacks,
+ description="The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute",
+ unit="{revision}",
+ )
+
+
+VCS_REF_TIME: Final = "vcs.ref.time"
+"""
+Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`
+Instrument: gauge
+Unit: s
+"""
+
+
+def create_vcs_ref_time(
+ meter: Meter, callbacks: Optional[Sequence[CallbackT]]
+) -> ObservableGauge:
+ """Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`"""
+ return meter.create_observable_gauge(
+ name=VCS_REF_TIME,
+ callbacks=callbacks,
+ description="Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`",
+ unit="s",
+ )
+
+
+VCS_REPOSITORY_COUNT: Final = "vcs.repository.count"
+"""
+The number of repositories in an organization
+Instrument: updowncounter
+Unit: {repository}
+"""
+
+
+def create_vcs_repository_count(meter: Meter) -> UpDownCounter:
+ """The number of repositories in an organization"""
+ return meter.create_up_down_counter(
+ name=VCS_REPOSITORY_COUNT,
+ description="The number of repositories in an organization.",
+ unit="{repository}",
+ )