diff --git a/.cspell.json b/.cspell.json index fd5a476fb7..bc3a488c8d 100644 --- a/.cspell.json +++ b/.cspell.json @@ -26,6 +26,7 @@ // workspace dictionary. "words": [ "actix", + "anyvalue", "appender", "appenders", "Bhasin", @@ -37,6 +38,7 @@ "deque", "Dirkjan", "EPYC", + "flamegraph", "hasher", "Isobel", "jaegertracing", @@ -53,6 +55,7 @@ "OTELCOL", "OTLP", "periodicreader", + "pprof", "prost", "protoc", "quantile", @@ -60,10 +63,13 @@ "reqwest", "runtimes", "rustc", + "serde", "shoppingcart", "struct", "Tescher", + "testcontainers", "testresults", + "thiserror", "tracerprovider", "updown", "Zhongyang", diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index badc78fb10..e412f7163c 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -9,7 +9,6 @@ jobs: integration_tests: runs-on: ubuntu-latest timeout-minutes: 10 - if: ${{ github.event.label.name == 'integration tests' || contains(github.event.pull_request.labels.*.name, 'integration tests') }} steps: - name: Free disk space run: | diff --git a/Cargo.toml b/Cargo.toml index 1445047825..3f119360d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,9 @@ members = [ "stress", ] resolver = "2" +# Avoid applying patch to force use of workspace members for this +# not actively maintained crate +exclude = ["opentelemetry-prometheus"] [profile.bench] # https://doc.rust-lang.org/cargo/reference/profiles.html#bench @@ -50,3 +53,9 @@ tracing = { version = ">=0.1.40", default-features = false } tracing-core = { version = ">=0.1.33", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } url = { version = "2.5", default-features = false } + +# Aviod use of crates.io version of these crates through the tracing-opentelemetry dependencies +[patch.crates-io] +opentelemetry = { path = "opentelemetry" } +opentelemetry_sdk = { path = "opentelemetry-sdk" } +opentelemetry-stdout = { path = "opentelemetry-stdout" } diff --git a/opentelemetry-appender-log/src/lib.rs b/opentelemetry-appender-log/src/lib.rs index 81ec10d129..1cc75de47f 100644 --- a/opentelemetry-appender-log/src/lib.rs +++ b/opentelemetry-appender-log/src/lib.rs @@ -116,7 +116,9 @@ use opentelemetry::{ InstrumentationScope, Key, }; #[cfg(feature = "experimental_metadata_attributes")] -use opentelemetry_semantic_conventions::attribute::{CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE}; +use opentelemetry_semantic_conventions::attribute::{ + CODE_FILEPATH, CODE_LINE_NUMBER, CODE_NAMESPACE, +}; pub struct OpenTelemetryLogBridge where @@ -158,7 +160,7 @@ where } if let Some(line_no) = record.line() { - log_record.add_attribute(Key::new(CODE_LINENO), AnyValue::from(line_no)); + log_record.add_attribute(Key::new(CODE_LINE_NUMBER), AnyValue::from(line_no)); } if let Some(module) = record.module_path() { @@ -769,7 +771,7 @@ mod tests { use super::OpenTelemetryLogBridge; use opentelemetry::{logs::AnyValue, StringValue}; - use opentelemetry_sdk::{logs::LoggerProvider, testing::logs::InMemoryLogExporter}; + use opentelemetry_sdk::{logs::InMemoryLogExporter, logs::LoggerProvider}; use log::Log; @@ -1171,7 +1173,7 @@ mod tests { #[test] fn logbridge_code_attributes() { use opentelemetry_semantic_conventions::attribute::{ - CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE, + CODE_FILEPATH, CODE_LINE_NUMBER, CODE_NAMESPACE, }; let exporter = InMemoryLogExporter::default(); @@ -1212,7 +1214,7 @@ mod tests { Some(AnyValue::String(StringValue::from("service"))), get(CODE_NAMESPACE) ); - assert_eq!(Some(AnyValue::Int(101)), get(CODE_LINENO)); + assert_eq!(Some(AnyValue::Int(101)), get(CODE_LINE_NUMBER)); } #[test] diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index 9fac13a4b2..0bbeabb275 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -3,6 +3,7 @@ ## vNext - Bump msrv to 1.75.0. +- New experimental feature to use trace\_id & span\_id from spans created through the [tracing](https://crates.io/crates/tracing) crate (experimental_use_tracing_span_context) [#2438](https://github.com/open-telemetry/opentelemetry-rust/pull/2438) ## 0.27.0 diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index 9e831eb38f..40cd98f801 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -17,6 +17,7 @@ tracing = { workspace = true, features = ["std"]} tracing-core = { workspace = true } tracing-log = { version = "0.2", optional = true } tracing-subscriber = { workspace = true, features = ["registry", "std"] } +tracing-opentelemetry = { version = "0.28", optional = true } [dev-dependencies] log = { workspace = true } @@ -28,11 +29,12 @@ criterion = { workspace = true } tokio = { workspace = true, features = ["full"]} [target.'cfg(not(target_os = "windows"))'.dev-dependencies] -pprof = { version = "0.13", features = ["flamegraph", "criterion"] } +pprof = { version = "0.14", features = ["flamegraph", "criterion"] } [features] experimental_metadata_attributes = ["dep:tracing-log"] spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] +experimental_use_tracing_span_context = ["tracing-opentelemetry"] [[bench]] diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index e5fb98273c..6c09f5c966 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -16,8 +16,8 @@ use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::InstrumentationScope; use opentelemetry_appender_tracing::layer as tracing_layer; -use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::LogResult; +use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LoggerProvider}; use opentelemetry_sdk::Resource; use pprof::criterion::{Output, PProfProfiler}; diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index af752c5f8e..0b77cbd2f5 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -8,7 +8,7 @@ use tracing_core::Level; use tracing_core::Metadata; #[cfg(feature = "experimental_metadata_attributes")] use tracing_log::NormalizeEvent; -use tracing_subscriber::Layer; +use tracing_subscriber::{registry::LookupSpan, Layer}; const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing"; @@ -149,7 +149,7 @@ where impl Layer for OpenTelemetryTracingBridge where - S: tracing::Subscriber, + S: tracing::Subscriber + for<'a> LookupSpan<'a>, P: LoggerProvider + Send + Sync + 'static, L: Logger + Send + Sync + 'static, { @@ -180,6 +180,26 @@ where // Visit fields. event.record(&mut visitor); + #[cfg(feature = "experimental_use_tracing_span_context")] + if let Some(span) = _ctx.event_span(event) { + use tracing_opentelemetry::OtelData; + let opt_span_id = span + .extensions() + .get::() + .and_then(|otd| otd.builder.span_id); + + let opt_trace_id = span.scope().last().and_then(|root_span| { + root_span + .extensions() + .get::() + .and_then(|otd| otd.builder.trace_id) + }); + + if let Some((trace_id, span_id)) = opt_trace_id.zip(opt_span_id) { + log_record.set_trace_context(trace_id, span_id, None); + } + } + //emit record self.logger.emit(log_record); } @@ -213,9 +233,9 @@ mod tests { use opentelemetry::trace::TracerProvider as _; use opentelemetry::trace::{TraceContextExt, TraceFlags, Tracer}; use opentelemetry::{logs::AnyValue, Key}; - use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; + use opentelemetry_sdk::logs::InMemoryLogExporter; + use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogRecord, LogResult, LoggerProvider}; - use opentelemetry_sdk::testing::logs::InMemoryLogExporter; use opentelemetry_sdk::trace::{Sampler, TracerProvider}; use tracing::{error, warn}; use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; @@ -495,6 +515,67 @@ mod tests { } } + #[cfg(feature = "experimental_use_tracing_span_context")] + #[test] + fn tracing_appender_inside_tracing_crate_context() { + use opentelemetry_sdk::trace::InMemorySpanExporterBuilder; + + // Arrange + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + // setup tracing layer to compare trace/span IDs against + let span_exporter = InMemorySpanExporterBuilder::new().build(); + let tracer_provider = TracerProvider::builder() + .with_simple_exporter(span_exporter.clone()) + .build(); + let tracer = tracer_provider.tracer("test-tracer"); + + let level_filter = tracing_subscriber::filter::LevelFilter::INFO; + let log_layer = + layer::OpenTelemetryTracingBridge::new(&logger_provider).with_filter(level_filter); + + let subscriber = tracing_subscriber::registry() + .with(log_layer) + .with(tracing_opentelemetry::layer().with_tracer(tracer)); + + // Avoiding global subscriber.init() as that does not play well with unit tests. + let _guard = tracing::subscriber::set_default(subscriber); + + // Act + tracing::info_span!("outer-span").in_scope(|| { + error!("first-event"); + + tracing::info_span!("inner-span").in_scope(|| { + error!("second-event"); + }); + }); + + logger_provider.force_flush(); + + let logs = exporter.get_emitted_logs().expect("No emitted logs"); + assert_eq!(logs.len(), 2); + + let spans = span_exporter.get_finished_spans().unwrap(); + assert_eq!(spans.len(), 2); + + let trace_id = spans[0].span_context.trace_id(); + assert_eq!(trace_id, spans[1].span_context.trace_id()); + let inner_span_id = spans[0].span_context.span_id(); + let outer_span_id = spans[1].span_context.span_id(); + assert_eq!(outer_span_id, spans[0].parent_span_id); + + let trace_ctx0 = logs[0].record.trace_context().unwrap(); + let trace_ctx1 = logs[1].record.trace_context().unwrap(); + + assert_eq!(trace_ctx0.trace_id, trace_id); + assert_eq!(trace_ctx1.trace_id, trace_id); + assert_eq!(trace_ctx0.span_id, outer_span_id); + assert_eq!(trace_ctx1.span_id, inner_span_id); + } + #[test] fn tracing_appender_standalone_with_tracing_log() { // Arrange diff --git a/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml b/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml index 242ea0c1f8..ef1595b0ae 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml +++ b/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml @@ -6,14 +6,14 @@ license = "Apache-2.0" publish = false [features] +default = ["reqwest-blocking"] reqwest-blocking = ["opentelemetry-otlp/reqwest-blocking-client"] -hyper = ["opentelemetry-otlp/hyper-client"] [dependencies] once_cell = { workspace = true } opentelemetry = { path = "../../../opentelemetry" } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "experimental_metrics_periodicreader_with_async_runtime"]} -opentelemetry-otlp = { path = "../..", features = ["http-proto", "http-json", "logs", "internal-logs"]} +opentelemetry_sdk = { path = "../../../opentelemetry-sdk" } +opentelemetry-otlp = { path = "../..", features = ["http-proto", "http-json", "logs", "internal-logs"], default-features = false} opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false} tokio = { workspace = true, features = ["full"] } diff --git a/opentelemetry-otlp/examples/basic-otlp-http/Dockerfile b/opentelemetry-otlp/examples/basic-otlp-http/Dockerfile deleted file mode 100644 index f88c276a55..0000000000 --- a/opentelemetry-otlp/examples/basic-otlp-http/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM rust:1.51 -COPY . /usr/src/basic-otlp-http/ -WORKDIR /usr/src/basic-otlp-http/ -RUN cargo build --release -RUN cargo install --path . -CMD ["/usr/local/cargo/bin/basic-otlp-http"] diff --git a/opentelemetry-otlp/examples/basic-otlp-http/README.md b/opentelemetry-otlp/examples/basic-otlp-http/README.md index 2d06e6a8fe..78ff779a66 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/README.md +++ b/opentelemetry-otlp/examples/basic-otlp-http/README.md @@ -16,46 +16,25 @@ recommended approach when using OTLP exporters. While it can be modified to use a `SimpleExporter`, this requires making the main function a regular main and *not* tokio main. -// TODO: Document `hyper` feature flag when using SimpleProcessor. +// TODO: Document how to use hyper client. ## Usage -### `docker-compose` - -By default runs against the `otel/opentelemetry-collector:latest` image, and uses `reqwest-client` -as the http client, using http as the transport. - -```shell -docker-compose up -``` - -In another terminal run the application `cargo run` - -The docker-compose terminal will display logs, traces, metrics. - -Press Ctrl+C to stop the collector, and then tear it down: - -```shell -docker-compose down -``` - -### Manual - -If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container -and inspect the logs to see traces being transferred. +Run the `otel/opentelemetry-collector` container using docker +and inspect the logs to see the exported telemetry. On Unix based systems use: ```shell # From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml +docker run --rm -it -p 4317:4317 -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml ``` On Windows use: ```shell # From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml +docker run --rm -it -p 4317:4317 -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml ``` Run the app which exports logs, metrics and traces via OTLP to the collector @@ -64,11 +43,7 @@ Run the app which exports logs, metrics and traces via OTLP to the collector cargo run ``` -By default the app will use a `reqwest` client to send. A hyper 0.14 client can be used with the `hyper` feature enabled - -```shell -cargo run --no-default-features --features=hyper -``` +The app will use a `reqwest-blocking` client to send. ## View results diff --git a/opentelemetry-otlp/examples/basic-otlp-http/docker-compose.yaml b/opentelemetry-otlp/examples/basic-otlp-http/docker-compose.yaml deleted file mode 100644 index dc9d1e7a5d..0000000000 --- a/opentelemetry-otlp/examples/basic-otlp-http/docker-compose.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: "2" -services: - - # Collector - otel-collector: - image: otel/opentelemetry-collector:latest - command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"] - volumes: - - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ports: - - "4318:4318" # OTLP HTTP receiver - - - - diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index bf33828091..763add8b50 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -69,9 +69,8 @@ fn init_metrics() -> Result Result<(), Box> { +#[tokio::main] +async fn main() -> Result<(), Box> { let logger_provider = init_logs()?; // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. diff --git a/opentelemetry-otlp/examples/basic-otlp/Cargo.toml b/opentelemetry-otlp/examples/basic-otlp/Cargo.toml index 735a9470d7..f841ae5374 100644 --- a/opentelemetry-otlp/examples/basic-otlp/Cargo.toml +++ b/opentelemetry-otlp/examples/basic-otlp/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] once_cell = { workspace = true } opentelemetry = { path = "../../../opentelemetry" } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio"] } +opentelemetry_sdk = { path = "../../../opentelemetry-sdk" } opentelemetry-otlp = { path = "../../../opentelemetry-otlp", features = ["grpc-tonic"] } tokio = { version = "1.0", features = ["full"] } opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false} diff --git a/opentelemetry-otlp/examples/basic-otlp/Dockerfile b/opentelemetry-otlp/examples/basic-otlp/Dockerfile deleted file mode 100644 index b63241e283..0000000000 --- a/opentelemetry-otlp/examples/basic-otlp/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM rust:1.51 -COPY . /usr/src/basic-otlp/ -WORKDIR /usr/src/basic-otlp/ -RUN cargo build --release -RUN cargo install --path . -CMD ["/usr/local/cargo/bin/basic-otlp"] diff --git a/opentelemetry-otlp/examples/basic-otlp/README.md b/opentelemetry-otlp/examples/basic-otlp/README.md index ca02018ad5..f4ebe150fb 100644 --- a/opentelemetry-otlp/examples/basic-otlp/README.md +++ b/opentelemetry-otlp/examples/basic-otlp/README.md @@ -49,42 +49,21 @@ fn main() -> Result<(), Box> { ## Usage -### `docker-compose` - -By default runs against the `otel/opentelemetry-collector:latest` image, and uses the `tonic`'s -`grpc` example as the transport. - -```shell -docker-compose up -``` - -In another terminal run the application `cargo run` - -The docker-compose terminal will display logs, traces, metrics. - -Press Ctrl+C to stop the collector, and then tear it down: - -```shell -docker-compose down -``` - -### Manual - -If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container -and inspect the logs to see traces being transferred. +Run the `otel/opentelemetry-collector` container using docker +and inspect the logs to see the exported telemetry. On Unix based systems use: ```shell # From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4317:4317 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml +docker run --rm -it -p 4317:4317 -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml ``` On Windows use: ```shell # From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4317:4317 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml +docker run --rm -it -p 4317:4317 -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml ``` Run the app which exports logs, metrics and traces via OTLP to the collector diff --git a/opentelemetry-otlp/examples/basic-otlp/docker-compose.yaml b/opentelemetry-otlp/examples/basic-otlp/docker-compose.yaml deleted file mode 100644 index fc9b3f1948..0000000000 --- a/opentelemetry-otlp/examples/basic-otlp/docker-compose.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: "2" -services: - - # Collector - otel-collector: - image: otel/opentelemetry-collector:latest - command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"] - volumes: - - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ports: - - "4317:4317" # OTLP gRPC receiver - - - - diff --git a/opentelemetry-otlp/src/exporter/http/logs.rs b/opentelemetry-otlp/src/exporter/http/logs.rs index 9d00602eed..8b828730cd 100644 --- a/opentelemetry-otlp/src/exporter/http/logs.rs +++ b/opentelemetry-otlp/src/exporter/http/logs.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use http::{header::CONTENT_TYPE, Method}; use opentelemetry::otel_debug; -use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogError, LogResult}; use super::OtlpHttpClient; diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index 4d1af8c880..07a70bf57d 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -14,9 +14,9 @@ use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scop #[cfg(feature = "trace")] use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope; #[cfg(feature = "logs")] -use opentelemetry_sdk::export::logs::LogBatch; +use opentelemetry_sdk::logs::LogBatch; #[cfg(feature = "trace")] -use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::trace::SpanData; use prost::Message; use std::collections::HashMap; use std::env; @@ -27,6 +27,9 @@ use std::time::Duration; #[cfg(feature = "metrics")] mod metrics; +#[cfg(feature = "metrics")] +use opentelemetry_sdk::metrics::data::ResourceMetrics; + #[cfg(feature = "logs")] pub(crate) mod logs; @@ -336,7 +339,7 @@ impl OtlpHttpClient { #[cfg(feature = "metrics")] fn build_metrics_export_body( &self, - metrics: &mut opentelemetry_sdk::metrics::data::ResourceMetrics, + metrics: &mut ResourceMetrics, ) -> opentelemetry_sdk::metrics::MetricResult<(Vec, &'static str)> { use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; diff --git a/opentelemetry-otlp/src/exporter/http/trace.rs b/opentelemetry-otlp/src/exporter/http/trace.rs index d188dc8911..cf3411e2e0 100644 --- a/opentelemetry-otlp/src/exporter/http/trace.rs +++ b/opentelemetry-otlp/src/exporter/http/trace.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use futures_core::future::BoxFuture; use http::{header::CONTENT_TYPE, Method}; use opentelemetry::{otel_debug, trace::TraceError}; -use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter}; +use opentelemetry_sdk::trace::{ExportResult, SpanData, SpanExporter}; use super::OtlpHttpClient; diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index 053331b428..a4d276fd8f 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -3,7 +3,7 @@ use opentelemetry::otel_debug; use opentelemetry_proto::tonic::collector::logs::v1::{ logs_service_client::LogsServiceClient, ExportLogsServiceRequest, }; -use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogError, LogResult}; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; diff --git a/opentelemetry-otlp/src/exporter/tonic/trace.rs b/opentelemetry-otlp/src/exporter/tonic/trace.rs index 998acafad5..fb72cccf4e 100644 --- a/opentelemetry-otlp/src/exporter/tonic/trace.rs +++ b/opentelemetry-otlp/src/exporter/tonic/trace.rs @@ -5,7 +5,7 @@ use opentelemetry::{otel_debug, trace::TraceError}; use opentelemetry_proto::tonic::collector::trace::v1::{ trace_service_client::TraceServiceClient, ExportTraceServiceRequest, }; -use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter}; +use opentelemetry_sdk::trace::{ExportResult, SpanData, SpanExporter}; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope; diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 3e02ef16a8..60b9d4c44c 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -7,10 +7,8 @@ //! order to support open-source telemetry data formats (e.g. Jaeger, //! Prometheus, etc.) sending to multiple open-source or commercial back-ends. //! -//! Currently, this crate only support sending telemetry in OTLP -//! via grpc and http (in binary format). Supports for other format and protocol -//! will be added in the future. The details of what's currently offering in this -//! crate can be found in this doc. +//! Currently, this crate supports sending telemetry in OTLP +//! via gRPC and http (binary and json). //! //! # Quickstart //! @@ -56,34 +54,36 @@ //! //! ## Performance //! -//! For optimal performance, a batch exporter is recommended as the simple -//! exporter will export each span synchronously on dropping. You can enable the -//! [`rt-tokio`], [`rt-tokio-current-thread`] or [`rt-async-std`] features and -//! specify a runtime on the pipeline builder to have a batch exporter -//! configured for you automatically. +//! For optimal performance, a batch exporting processor is recommended as the simple +//! processor will export each span synchronously on dropping, and is only good +//! for test/debug purposes. //! //! ```toml //! [dependencies] -//! opentelemetry_sdk = { version = "*", features = ["async-std"] } //! opentelemetry-otlp = { version = "*", features = ["grpc-tonic"] } //! ``` //! //! ```no_run //! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] //! # { -//! # fn main() -> Result<(), opentelemetry::trace::TraceError> { -//! let tracer = opentelemetry_sdk::trace::TracerProvider::builder() -//! .with_batch_exporter( -//! opentelemetry_otlp::SpanExporter::builder() -//! .with_tonic() -//! .build()?, -//! opentelemetry_sdk::runtime::Tokio, -//! ) -//! .build(); +//! use opentelemetry::global; +//! use opentelemetry::trace::Tracer; //! -//! # Ok(()) -//! # } -//! # } +//! fn main() -> Result<(), Box> { +//! // First, create a OTLP exporter builder. Configure it as you need. +//! let otlp_exporter = opentelemetry_otlp::SpanExporter::builder().with_tonic().build()?; +//! // Then pass it into provider builder +//! let _ = opentelemetry_sdk::trace::TracerProvider::builder() +//! .with_batch_exporter(otlp_exporter) +//! .build(); +//! let tracer = global::tracer("my_tracer"); +//! tracer.in_span("doing_work", |cx| { +//! // Traced app logic here... +//! }); +//! +//! Ok(()) +//! # } +//! } //! ``` //! //! [`tokio`]: https://tokio.rs @@ -92,7 +92,7 @@ //! # Feature Flags //! The following feature flags can enable exporters for different telemetry signals: //! -//! * `trace`: Includes the trace exporters (enabled by default). +//! * `trace`: Includes the trace exporters. //! * `metrics`: Includes the metrics exporters. //! * `logs`: Includes the logs exporters. //! @@ -101,8 +101,8 @@ //! //! The following feature flags offer additional configurations on gRPC: //! -//! For users uses `tonic` as grpc layer: -//! * `grpc-tonic`: Use `tonic` as grpc layer. This is enabled by default. +//! For users using `tonic` as grpc layer: +//! * `grpc-tonic`: Use `tonic` as grpc layer. //! * `gzip-tonic`: Use gzip compression for `tonic` grpc layer. //! * `zstd-tonic`: Use zstd compression for `tonic` grpc layer. //! * `tls-roots`: Adds system trust roots to rustls-based gRPC clients using the rustls-native-certs crate @@ -110,8 +110,8 @@ //! //! The following feature flags offer additional configurations on http: //! -//! * `http-proto`: Use http as transport layer, protobuf as body format. -//! * `reqwest-blocking-client`: Use reqwest blocking http client. +//! * `http-proto`: Use http as transport layer, protobuf as body format. This feature is enabled by default. +//! * `reqwest-blocking-client`: Use reqwest blocking http client. This feature is enabled by default. //! * `reqwest-client`: Use reqwest http client. //! * `reqwest-rustls`: Use reqwest with TLS with system trust roots via `rustls-native-certs` crate. //! * `reqwest-rustls-webpki-roots`: Use reqwest with TLS with Mozilla's trust roots via `webpki-roots` crate. @@ -152,7 +152,7 @@ //! .build()?; //! //! let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder() -//! .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio) +//! .with_batch_exporter(exporter) //! .with_config( //! trace::Config::default() //! .with_sampler(Sampler::AlwaysOn) @@ -162,7 +162,7 @@ //! .with_max_events_per_span(16) //! .with_resource(Resource::builder_empty().with_attributes([KeyValue::new("service.name", "example")]).build()), //! ).build(); -//! global::set_tracer_provider(tracer_provider); +//! global::set_tracer_provider(tracer_provider.clone()); //! let tracer = global::tracer("tracer-name"); //! # tracer //! # }; @@ -179,7 +179,7 @@ //! //! let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter) //! .with_interval(std::time::Duration::from_secs(3)) -//! .with_timeout(Duration::from_secs(10)) +//! .with_timeout(Duration::from_secs(10)) //! .build(); //! //! let provider = opentelemetry_sdk::metrics::SdkMeterProvider::builder() @@ -262,7 +262,7 @@ pub use crate::exporter::{ OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, }; -use opentelemetry_sdk::export::ExportError; +use opentelemetry_sdk::ExportError; /// Type to indicate the builder does not have a client set. #[derive(Debug, Default, Clone)] diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index aa4ea8fa07..3b17c30feb 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -8,7 +8,7 @@ use std::fmt::Debug; use opentelemetry_sdk::logs::LogResult; -use opentelemetry_sdk::export::logs::LogBatch; +use opentelemetry_sdk::logs::LogBatch; use crate::{HasExportConfig, NoExporterBuilderSet}; @@ -140,7 +140,7 @@ impl LogExporter { } } -impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { +impl opentelemetry_sdk::logs::LogExporter for LogExporter { #[allow(clippy::manual_async_fn)] fn export( &self, diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index 190e3fdfce..b8c013f5d4 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -5,7 +5,7 @@ use std::fmt::Debug; use futures_core::future::BoxFuture; -use opentelemetry_sdk::export::trace::{ExportResult, SpanData}; +use opentelemetry_sdk::trace::{ExportResult, SpanData}; #[cfg(feature = "grpc-tonic")] use crate::{ @@ -107,7 +107,7 @@ impl HasHttpConfig for SpanExporterBuilder { /// OTLP exporter that sends tracing information #[derive(Debug)] -pub struct SpanExporter(Box); +pub struct SpanExporter(Box); impl SpanExporter { /// Obtain a builder to configure a [SpanExporter]. @@ -116,12 +116,12 @@ impl SpanExporter { } /// Build a new span exporter from a client - pub fn new(client: impl opentelemetry_sdk::export::trace::SpanExporter + 'static) -> Self { + pub fn new(client: impl opentelemetry_sdk::trace::SpanExporter + 'static) -> Self { SpanExporter(Box::new(client)) } } -impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { +impl opentelemetry_sdk::trace::SpanExporter for SpanExporter { fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { self.0.export(batch) } diff --git a/opentelemetry-otlp/tests/integration_test/Cargo.toml b/opentelemetry-otlp/tests/integration_test/Cargo.toml index 5314c1fe61..dc58d5d44b 100644 --- a/opentelemetry-otlp/tests/integration_test/Cargo.toml +++ b/opentelemetry-otlp/tests/integration_test/Cargo.toml @@ -14,6 +14,7 @@ testcontainers = { version = "0.23.1", features = ["http_wait"]} once_cell.workspace = true anyhow = "1.0.94" ctor = "0.2.9" +uuid = { version = "1.3", features = ["v4"] } tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } tracing = {workspace = true} diff --git a/opentelemetry-otlp/tests/integration_test/src/test_utils.rs b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs index bd62674868..d5662407f9 100644 --- a/opentelemetry-otlp/tests/integration_test/src/test_utils.rs +++ b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs @@ -20,8 +20,7 @@ use anyhow::Result; use opentelemetry::{otel_debug, otel_info}; -use std::fs; -use std::fs::File; +use std::fs::{self, File, OpenOptions}; use std::os::unix::fs::PermissionsExt; use std::sync::{Arc, Mutex, Once, OnceLock}; use testcontainers::core::wait::HttpWaitStrategy; @@ -52,7 +51,7 @@ fn init_tracing() { // Initialize the tracing subscriber with the OpenTelemetry layer and the // Fmt layer. tracing_subscriber::registry().with(fmt_layer).init(); - otel_info!(name: "tracing initializing completed!"); + otel_info!(name: "tracing::fmt initializing completed! SDK internal logs will be printed to stdout."); }); } @@ -125,6 +124,17 @@ fn upsert_empty_file(path: &str) -> File { file } +/// Cleans up file specificed as argument by truncating its content. +/// +/// This function is meant to cleanup the generated json file before a test starts, +/// preventing entries from previous tests from interfering with the current test's results. +pub fn cleanup_file(file_path: &str) { + let _ = OpenOptions::new() + .write(true) + .truncate(true) + .open(file_path); // ignore result, as file may not exist +} + /// /// Shuts down our collector container. This should be run as part of each test /// suite shutting down! diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs.rs b/opentelemetry-otlp/tests/integration_test/tests/logs.rs index af78ee9005..26ad95c995 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/logs.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/logs.rs @@ -2,15 +2,19 @@ use anyhow::Result; use ctor::dtor; -use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; use integration_test_runner::test_utils; +use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; use opentelemetry_otlp::LogExporter; use opentelemetry_sdk::logs::LoggerProvider; use opentelemetry_sdk::{logs as sdklogs, Resource}; use std::fs::File; -use std::os::unix::fs::MetadataExt; +use std::io::Read; +use std::time::Duration; +use tracing::info; +use tracing_subscriber::layer::SubscriberExt; +use uuid::Uuid; -fn init_logs() -> Result { +fn init_logs(is_simple: bool) -> Result { let exporter_builder = LogExporter::builder(); #[cfg(feature = "tonic-client")] let exporter_builder = exporter_builder.with_tonic(); @@ -24,24 +28,119 @@ fn init_logs() -> Result { let exporter = exporter_builder.build()?; - Ok(LoggerProvider::builder() - .with_batch_exporter(exporter) + let mut logger_provider_builder = LoggerProvider::builder(); + if is_simple { + logger_provider_builder = logger_provider_builder.with_simple_exporter(exporter) + } else { + logger_provider_builder = logger_provider_builder.with_batch_exporter(exporter) + }; + + let logger_provider = logger_provider_builder .with_resource( Resource::builder_empty() .with_service_name("logs-integration-test") .build(), ) - .build()) + .build(); + + Ok(logger_provider) +} + +async fn logs_tokio_helper(is_simple: bool, log_send_outside_rt: bool) -> Result<()> { + use crate::{assert_logs_results_contains, init_logs}; + test_utils::start_collector_container().await?; + + let logger_provider = init_logs(is_simple).unwrap(); + let layer = OpenTelemetryTracingBridge::new(&logger_provider); + // generate a random uuid and store it to expected guid + let expected_uuid = std::sync::Arc::new(Uuid::new_v4().to_string()); + { + let clone_uuid = expected_uuid.clone(); + if log_send_outside_rt { + std::thread::spawn(move || { + let subscriber = tracing_subscriber::registry().with(layer); + let _guard = tracing::subscriber::set_default(subscriber); + info!( + target: "my-target", + uuid = clone_uuid.as_str(), + "hello from {}. My price is {}.", + "banana", + 2.99 + ); + }) + .join() + .unwrap(); + } else { + let subscriber = tracing_subscriber::registry().with(layer); + let _guard = tracing::subscriber::set_default(subscriber); + info!(target: "my-target", uuid = expected_uuid.as_str(), "hello from {}. My price is {}.", "banana", 2.99); + } + } + let _ = logger_provider.shutdown(); + tokio::time::sleep(Duration::from_secs(5)).await; + assert_logs_results_contains(test_utils::LOGS_FILE, expected_uuid.as_str())?; + Ok(()) +} + +fn logs_non_tokio_helper(is_simple: bool, init_logs_inside_rt: bool) -> Result<()> { + let rt = tokio::runtime::Runtime::new()?; + let logger_provider = if init_logs_inside_rt { + // Initialize the logger provider inside the Tokio runtime + rt.block_on(async { + // Setup the collector container inside Tokio runtime + test_utils::start_collector_container().await?; + init_logs(is_simple) + })? + } else { + // Initialize the logger provider outside the Tokio runtime + rt.block_on(async { + let _ = test_utils::start_collector_container().await; + }); + init_logs(is_simple)? + }; + + let layer = OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + // Generate a random UUID and store it to expected guid + let expected_uuid = Uuid::new_v4().to_string(); + { + let _guard = tracing::subscriber::set_default(subscriber); + info!( + target: "my-target", + uuid = expected_uuid, + "hello from {}. My price is {}.", + "banana", + 2.99 + ); + } + + let _ = logger_provider.shutdown(); + std::thread::sleep(Duration::from_secs(5)); + assert_logs_results_contains(test_utils::LOGS_FILE, expected_uuid.as_str())?; + Ok(()) +} + +fn assert_logs_results_contains(result: &str, expected_content: &str) -> Result<()> { + let file = File::open(result)?; + let mut contents = String::new(); + let mut reader = std::io::BufReader::new(&file); + reader.read_to_string(&mut contents)?; + assert!(contents.contains(expected_content)); + Ok(()) } #[cfg(test)] mod logtests { + // The tests in this mod works like below: Emit a log with a UUID, + // then read the logs from the file and check if the UUID is present in the + // logs. This makes it easy to validate with a single collector and its + // output. This is a very simple test but good enough to validate that OTLP + // Exporter did work! + use super::*; use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; - use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; - use std::{fs::File, time::Duration}; - use tracing::info; - use tracing_subscriber::layer::SubscriberExt; + use std::fs::File; #[test] #[should_panic(expected = "assertion `left == right` failed: body does not match")] @@ -67,78 +166,196 @@ mod logtests { Ok(()) } + // Batch Processor + + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest-blocking + // Worker threads - 4 #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[cfg(not(feature = "hyper-client"))] - #[cfg(not(feature = "reqwest-client"))] - pub async fn test_logs() -> Result<()> { - // Make sure the container is running - - use integration_test_runner::test_utils; - use opentelemetry_appender_tracing::layer; - use tracing::info; - use tracing_subscriber::layer::SubscriberExt; - - use crate::{assert_logs_results, init_logs}; - test_utils::start_collector_container().await?; - - let logger_provider = init_logs().unwrap(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); - { - let _guard = tracing::subscriber::set_default(subscriber); - info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); - } - // TODO: remove below wait before calling logger_provider.shutdown() - // tokio::time::sleep(Duration::from_secs(10)).await; - let _ = logger_provider.shutdown(); + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_multi_thread() -> Result<()> { + logs_tokio_helper(false, false).await + } - tokio::time::sleep(Duration::from_secs(10)).await; + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest-blocking + // Worker threads - 1 + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_multi_with_one_worker() -> Result<()> { + logs_tokio_helper(false, false).await + } - assert_logs_results(test_utils::LOGS_FILE, "expected/logs.json")?; + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest-blocking + // current thread + #[tokio::test(flavor = "current_thread")] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_current() -> Result<()> { + logs_tokio_helper(false, false).await + } - Ok(()) + // logger initialization - Inside RT + // Log emission - Outside RT + // Client - Tonic, Reqwest-blocking + // Worker threads - 4 + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_log_outside_rt_multi_thread() -> Result<()> { + logs_tokio_helper(false, true).await + } + + // logger initialization - Inside RT + // log emission - Outside RT + // Client - Tonic, Reqwest-blocking + // Worker threads - 1 + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_log_outside_rt_multi_with_one_worker() -> Result<()> { + logs_tokio_helper(false, true).await + } + + // logger initialization - Inside RT + // log emission - Outside RT + // Client - Tonic, Reqwest-blocking + // current thread + #[tokio::test(flavor = "current_thread")] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub async fn logs_batch_tokio_log_outside_rt_current_thread() -> Result<()> { + logs_tokio_helper(false, true).await } - #[ignore = "TODO: [Fix Me] Failing on CI. Needs to be investigated and resolved."] + // logger initialization - Inside RT + // Log emission - Inside RT + // Client - Tonic, Reqwest-blocking + // current thread #[test] #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] - pub fn logs_batch_non_tokio_main() -> Result<()> { - // Initialize the logger provider inside a tokio runtime - // as this allows tonic client to capture the runtime, - // but actual export occurs from the dedicated std::thread - // created by BatchLogProcessor. - let rt = tokio::runtime::Runtime::new()?; - let logger_provider = rt.block_on(async { - // While we're here setup our collector container too, as this needs tokio to run - test_utils::start_collector_container().await?; - init_logs() - })?; + pub fn logs_batch_non_tokio_main_init_logs_inside_rt() -> Result<()> { + logs_non_tokio_helper(false, true) + } - info!("LoggerProvider created"); - let layer = OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); - { - let _guard = tracing::subscriber::set_default(subscriber); - info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); - } - let _ = logger_provider.shutdown(); - // tokio::time::sleep(Duration::from_secs(10)).await; - assert_logs_results(test_utils::LOGS_FILE, "expected/logs.json")?; + // logger initialization - Outside RT + // log emission - Outside RT + // Client - Tonic, Reqwest-blocking + // current thread + #[test] + #[cfg(feature = "reqwest-blocking-client")] + pub fn logs_batch_non_tokio_main_with_init_logs_outside_rt() -> Result<()> { + logs_non_tokio_helper(false, false) + } - Ok(()) + // logger initialization - Inside RT + // log emission - Outside RT + // Client - Tonic, Reqwest-blocking + // current thread + #[test] + #[cfg(feature = "reqwest-blocking-client")] + pub fn logs_batch_non_tokio_main_with_init_logs_inside_rt() -> Result<()> { + logs_non_tokio_helper(false, true) } -} -pub fn assert_logs_results(result: &str, expected: &str) -> Result<()> { - let left = read_logs_from_json(File::open(expected)?)?; - let right = read_logs_from_json(File::open(result)?)?; + // **Simple Processor** - LogsAsserter::new(left, right).assert(); + // logger initialization - Inside RT + // log emission - Outside RT + // Client - Tonic, Reqwest-blocking + #[test] + #[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] + pub fn logs_simple_non_tokio_main_with_init_logs_inside_rt() -> Result<()> { + logs_non_tokio_helper(true, true) + } - assert!(File::open(result).unwrap().metadata().unwrap().size() > 0); - Ok(()) -} + // logger initialization - Inside RT + // log emission - Outside RT + // Client - reqwest, hyper + #[ignore] // request and hyper client does not work without tokio runtime + #[test] + #[cfg(any(feature = "reqwest-client", feature = "hyper-client"))] + pub fn logs_simple_non_tokio_main_with_init_logs_inside_rt() -> Result<()> { + logs_non_tokio_helper(true, true) + } + + // logger initialization - Outside RT + // log emission - Outside RT + // Client - Reqwest-blocking + #[test] + #[cfg(feature = "reqwest-blocking-client")] + pub fn logs_simple_non_tokio_main_with_init_logs_outsie_rt() -> Result<()> { + logs_non_tokio_helper(true, false) + } + // logger initialization - Outside RT + // log emission - Outside RT + // Client - hyper, tonic, reqwest + #[ignore] // request, tonic and hyper client does not work without tokio runtime + #[test] + #[cfg(any( + feature = "hyper-client", + feature = "tonic-client", + feature = "reqwest-client" + ))] + pub fn logs_simple_non_tokio_main_with_init_logs_outsie_rt() -> Result<()> { + logs_non_tokio_helper(true, false) + } + + // logger initialization - Inside RT + // log emission - Inside RT + // Client - reqwest-blocking + // Worker threads - 4 + #[ignore] // request-blocking client does not work with tokio + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[cfg(feature = "reqwest-blocking-client")] + pub async fn logs_simple_tokio_multi_thread() -> Result<()> { + logs_tokio_helper(true, false).await + } + + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest, hyper + // Worker threads - 4 + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[cfg(any( + feature = "tonic-client", + feature = "reqwest-client", + feature = "hyper-client" + ))] + pub async fn logs_simple_tokio_multi_thread() -> Result<()> { + logs_tokio_helper(true, false).await + } + + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest, hyper + // Worker threads - 1 + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[cfg(any( + feature = "tonic-client", + feature = "reqwest-client", + feature = "hyper-client" + ))] + pub async fn logs_simple_tokio_multi_with_one_worker() -> Result<()> { + logs_tokio_helper(true, false).await + } + + // logger initialization - Inside RT + // log emission - Inside RT + // Client - Tonic, Reqwest, hyper + // Current thread + #[ignore] // https://github.com/open-telemetry/opentelemetry-rust/issues/2539 + #[tokio::test(flavor = "current_thread")] + #[cfg(any( + feature = "tonic-client", + feature = "reqwest-client", + feature = "hyper-client" + ))] + pub async fn logs_simple_tokio_current() -> Result<()> { + logs_tokio_helper(true, false).await + } +} /// /// Make sure we stop the collector container, otherwise it will sit around hogging our /// ports and subsequent test runs will fail. diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs_serialize_deserialize.rs b/opentelemetry-otlp/tests/integration_test/tests/logs_serialize_deserialize.rs new file mode 100644 index 0000000000..37854ba397 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/tests/logs_serialize_deserialize.rs @@ -0,0 +1,64 @@ +#![cfg(unix)] + +use anyhow::Result; +use ctor::dtor; +use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; +use integration_test_runner::test_utils; +use opentelemetry_appender_tracing::layer; +use opentelemetry_otlp::LogExporter; +use opentelemetry_sdk::logs::LoggerProvider; +use opentelemetry_sdk::Resource; +use std::fs::File; +use std::os::unix::fs::MetadataExt; +use tracing::info; +use tracing_subscriber::layer::SubscriberExt; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[cfg(feature = "tonic-client")] +pub async fn test_logs() -> Result<()> { + test_utils::start_collector_container().await?; + test_utils::cleanup_file("./actual/logs.json"); // Ensure logs.json is empty before the test + let exporter_builder = LogExporter::builder().with_tonic(); + let exporter = exporter_builder.build()?; + let mut logger_provider_builder = LoggerProvider::builder(); + logger_provider_builder = logger_provider_builder.with_batch_exporter(exporter); + let logger_provider = logger_provider_builder + .with_resource( + Resource::builder_empty() + .with_service_name("logs-integration-test") + .build(), + ) + .build(); + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + { + let _guard = tracing::subscriber::set_default(subscriber); + info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); + } + + let _ = logger_provider.shutdown(); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + assert_logs_results(test_utils::LOGS_FILE, "expected/logs.json")?; + Ok(()) +} + +fn assert_logs_results(result: &str, expected: &str) -> Result<()> { + let left = read_logs_from_json(File::open(expected)?)?; + let right = read_logs_from_json(File::open(result)?)?; + + LogsAsserter::new(left, right).assert(); + + assert!(File::open(result).unwrap().metadata().unwrap().size() > 0); + Ok(()) +} + +/// +/// Make sure we stop the collector container, otherwise it will sit around hogging our +/// ports and subsequent test runs will fail. +/// +#[dtor] +fn shutdown() { + println!("metrics::shutdown"); + test_utils::stop_collector_container(); +} diff --git a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs index 125c501e14..311ddbfae7 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs @@ -189,9 +189,8 @@ pub fn validate_metrics_against_results(scope_name: &str) -> Result<()> { /// TODO - fix this asynchronously. /// #[cfg(test)] -#[cfg(not(feature = "hyper-client"))] -#[cfg(not(feature = "reqwest-client"))] -mod tests { +#[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] +mod metrictests { use super::*; use opentelemetry::metrics::MeterProvider; @@ -246,7 +245,6 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - // #[ignore] // skip when running unit test async fn test_histogram() -> Result<()> { _ = setup_metrics_test().await; const METER_NAME: &str = "test_histogram_meter"; @@ -263,7 +261,6 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - // #[ignore] // skip when running unit test async fn test_up_down_counter() -> Result<()> { _ = setup_metrics_test().await; const METER_NAME: &str = "test_up_down_meter"; diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index e137fa1cad..65f42402e8 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -48,6 +48,7 @@ const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[cfg(any(feature = "tonic-client", feature = "reqwest-blocking-client"))] pub async fn traces() -> Result<()> { test_utils::start_collector_container().await?; diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index fc6c2221d6..4ace0c2a84 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -20,19 +20,19 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -once_cell = { workspace = true } +once_cell = { version = "1.13" } opentelemetry = { version = "0.27", default-features = false, features = ["metrics"] } opentelemetry_sdk = { version = "0.27", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" -tracing = {workspace = true, optional = true} # optional for opentelemetry internal logging +tracing = {version = ">=0.1.40", default-features = false, optional = true} # optional for opentelemetry internal logging [dev-dependencies] opentelemetry-semantic-conventions = { version = "0.27" } -http-body-util = { workspace = true } -hyper = { workspace = true, features = ["full"] } -hyper-util = { workspace = true, features = ["full"] } -tokio = { workspace = true, features = ["full"] } +http-body-util = { version = "0.1" } +hyper = { version = "1.3", features = ["full"] } +hyper-util = { version = "0.1", features = ["full"] } +tokio = { version = "1", features = ["full"] } [features] default = ["internal-logs"] diff --git a/opentelemetry-proto/src/transform/logs.rs b/opentelemetry-proto/src/transform/logs.rs index b6f28490d7..8bbefe0c65 100644 --- a/opentelemetry-proto/src/transform/logs.rs +++ b/opentelemetry-proto/src/transform/logs.rs @@ -12,7 +12,7 @@ pub mod tonic { transform::common::{to_nanos, tonic::ResourceAttributesWithSchema}, }; use opentelemetry::logs::{AnyValue as LogsAnyValue, Severity}; - use opentelemetry_sdk::export::logs::LogBatch; + use opentelemetry_sdk::logs::LogBatch; use std::borrow::Cow; use std::collections::HashMap; @@ -221,15 +221,39 @@ pub mod tonic { mod tests { use crate::transform::common::tonic::ResourceAttributesWithSchema; use opentelemetry::logs::LogRecord as _; + use opentelemetry::logs::Logger as _; + use opentelemetry::logs::LoggerProvider as _; use opentelemetry::InstrumentationScope; - use opentelemetry_sdk::{export::logs::LogBatch, logs::LogRecord, Resource}; + use opentelemetry_sdk::logs::LogProcessor; + use opentelemetry_sdk::logs::{LogResult, LoggerProvider}; + use opentelemetry_sdk::{logs::LogBatch, logs::LogRecord, Resource}; use std::time::SystemTime; + #[derive(Debug)] + struct MockProcessor; + + impl LogProcessor for MockProcessor { + fn emit(&self, _record: &mut LogRecord, _instrumentation: &InstrumentationScope) {} + + fn force_flush(&self) -> LogResult<()> { + Ok(()) + } + + fn shutdown(&self) -> LogResult<()> { + Ok(()) + } + } + fn create_test_log_data( instrumentation_name: &str, _message: &str, ) -> (LogRecord, InstrumentationScope) { - let mut logrecord = LogRecord::default(); + let processor = MockProcessor {}; + let logger = LoggerProvider::builder() + .with_log_processor(processor) + .build() + .logger("test"); + let mut logrecord = logger.create_log_record(); logrecord.set_timestamp(SystemTime::now()); logrecord.set_observed_timestamp(SystemTime::now()); let instrumentation = diff --git a/opentelemetry-proto/src/transform/metrics.rs b/opentelemetry-proto/src/transform/metrics.rs index cb135ebf83..680da03b3f 100644 --- a/opentelemetry-proto/src/transform/metrics.rs +++ b/opentelemetry-proto/src/transform/metrics.rs @@ -10,8 +10,8 @@ pub mod tonic { use opentelemetry::{otel_debug, Key, Value}; use opentelemetry_sdk::metrics::data::{ - self, Exemplar as SdkExemplar, ExponentialHistogram as SdkExponentialHistogram, - Gauge as SdkGauge, Histogram as SdkHistogram, Metric as SdkMetric, + Exemplar as SdkExemplar, ExponentialHistogram as SdkExponentialHistogram, + Gauge as SdkGauge, Histogram as SdkHistogram, Metric as SdkMetric, ResourceMetrics, ScopeMetrics as SdkScopeMetrics, Sum as SdkSum, }; use opentelemetry_sdk::metrics::Temporality; @@ -110,8 +110,8 @@ pub mod tonic { } } - impl From<&data::ResourceMetrics> for ExportMetricsServiceRequest { - fn from(rm: &data::ResourceMetrics) -> Self { + impl From<&ResourceMetrics> for ExportMetricsServiceRequest { + fn from(rm: &ResourceMetrics) -> Self { ExportMetricsServiceRequest { resource_metrics: vec![TonicResourceMetrics { resource: Some((&rm.resource).into()), diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 8806af41c9..ab70bedd1e 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -8,7 +8,7 @@ pub mod tonic { }; use opentelemetry::trace; use opentelemetry::trace::{Link, SpanId, SpanKind}; - use opentelemetry_sdk::export::trace::SpanData; + use opentelemetry_sdk::trace::SpanData; use std::collections::HashMap; impl From for span::SpanKind { @@ -45,8 +45,8 @@ pub mod tonic { } } } - impl From for Span { - fn from(source_span: opentelemetry_sdk::export::trace::SpanData) -> Self { + impl From for Span { + fn from(source_span: opentelemetry_sdk::trace::SpanData) -> Self { let span_kind: span::SpanKind = source_span.span_kind.into(); Span { trace_id: source_span.span_context.trace_id().to_bytes().to_vec(), @@ -198,8 +198,8 @@ mod tests { }; use opentelemetry::InstrumentationScope; use opentelemetry::KeyValue; - use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::resource::Resource; + use opentelemetry_sdk::trace::SpanData; use opentelemetry_sdk::trace::{SpanEvents, SpanLinks}; use std::borrow::Cow; use std::time::{Duration, SystemTime}; diff --git a/opentelemetry-proto/src/transform/tracez.rs b/opentelemetry-proto/src/transform/tracez.rs index 525064efc9..3147a82cbb 100644 --- a/opentelemetry-proto/src/transform/tracez.rs +++ b/opentelemetry-proto/src/transform/tracez.rs @@ -1,7 +1,7 @@ #[cfg(all(feature = "gen-tonic-messages", feature = "zpages"))] mod tonic { use opentelemetry::trace::{Event, Status}; - use opentelemetry_sdk::export::trace::SpanData; + use opentelemetry_sdk::trace::SpanData; use crate::proto::tonic::{ trace::v1::{span::Event as SpanEvent, Status as SpanStatus}, diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index b7a3f2a0da..3d27d1ef1c 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -17,6 +17,7 @@ - *Feature*: Add `ResourceBuilder` for an easy way to create new `Resource`s - *Breaking*: Remove `Resource::{new,empty,from_detectors,new_with_defaults,from_schema_url,merge,default}` from public api. To create Resources you should only use `Resource::builder()` or `Resource::builder_empty()`. See [#2322](https://github.com/open-telemetry/opentelemetry-rust/pull/2322) for a migration guide. Example Usage: + ```rust // old Resource::default().with_attributes([ @@ -30,6 +31,7 @@ .with_attribute(KeyValue::new("key", "value")) .build(); ``` + - *Breaking* The LogExporter::export() method no longer requires a mutable reference to self.: Before: async fn export(&mut self, _batch: LogBatch<'_>) -> LogResult<()> @@ -44,7 +46,7 @@ - *Breaking* Removed the following deprecated methods: - `Logger::provider()` : Previously deprecated in version 0.27.1 - `Logger::instrumentation_scope()` : Previously deprecated in version 0.27.1. - Migration Guidance: + Migration Guidance: - These methods were intended for log appenders. Keep the clone of the provider handle, instead of depending on above methods. - *Breaking* - `PeriodicReader` Updates @@ -58,38 +60,50 @@ **`experimental_metrics_periodicreader_with_async_runtime`**. Migration Guide: - - 1. *Default Implementation, requires no async runtime* (**Recommended**) The + 1. *Default Implementation, requires no async runtime* (**Recommended**) The new default implementation does not require a runtime argument. Replace the builder method accordingly: - - *Before:* - ```rust - let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, runtime::Tokio).build(); - ``` - - *After:* - ```rust - let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter).build(); + *Before:* + + ```rust + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, runtime::Tokio).build(); ``` + *After:* + + ```rust + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter).build(); + ``` + + The new PeriodicReader can be used with OTLP Exporter, and supports + following exporter features: + - `grpc-tonic`: This requires `MeterProvider` to be created within a tokio + runtime. + - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. + + In other words, other clients like `reqwest` and `hyper` are not supported. + 2. *Async Runtime Support* If your application cannot spin up new threads or you prefer using async runtimes, enable the "experimental_metrics_periodicreader_with_async_runtime" feature flag and - adjust code as below. + adjust code as below. - *Before:* + ```rust let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, runtime::Tokio).build(); ``` - *After:* + ```rust let reader = opentelemetry_sdk::metrics::periodic_reader_with_async_runtime::PeriodicReader::builder(exporter, runtime::Tokio).build(); - ``` + ``` *Requirements:* - Enable the feature flag: - `experimental_metrics_periodicreader_with_async_runtime`. + `experimental_metrics_periodicreader_with_async_runtime`. - Continue enabling one of the async runtime feature flags: `rt-tokio`, `rt-tokio-current-thread`, or `rt-async-std`. @@ -104,11 +118,10 @@ - Getter methods have been introduced to access field values. This change impacts custom exporter and processor developers by requiring updates to code that directly accessed LogRecord fields. They must now use the provided getter methods (e.g., `log_record.event_name()` instead of `log_record.event_name`). -- Upgrade the tracing crate used for internal logging to version 0.1.40 or later. This is necessary because the internal logging macros utilize the name field as +- Upgrade the tracing crate used for internal logging to version 0.1.40 or later. This is necessary because the internal logging macros utilize the name field as metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/open-telemetry/opentelemetry-rust/pull/2418) -- **Breaking** [#2436](https://github.com/open-telemetry/opentelemetry-rust/pull/2436) - +- *Breaking* - `BatchLogProcessor` Updates [#2436](https://github.com/open-telemetry/opentelemetry-rust/pull/2436) `BatchLogProcessor` no longer requires an async runtime by default. Instead, a dedicated background thread is created to do the batch processing and exporting. @@ -120,6 +133,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope new default implementation does not require a runtime argument. Replace the builder method accordingly: - *Before:* + ```rust let logger_provider = LoggerProvider::builder() .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build()) @@ -127,12 +141,21 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope ``` - *After:* + ```rust let logger_provider = LoggerProvider::builder() .with_log_processor(BatchLogProcessor::builder(exporter).build()) .build(); ``` + The new BatchLogProcessor can be used with OTLP Exporter, and supports + following exporter features: + - `grpc-tonic`: This requires `MeterProvider` to be created within a tokio + runtime. + - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. + + In other words, other clients like `reqwest` and `hyper` are not supported. + 2. *Async Runtime Support* If your application cannot spin up new threads or you prefer using async runtimes, enable the @@ -140,6 +163,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope adjust code as below. - *Before:* + ```rust let logger_provider = LoggerProvider::builder() .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build()) @@ -147,6 +171,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope ``` - *After:* + ```rust let logger_provider = LoggerProvider::builder() .with_log_processor(log_processor_with_async_runtime::BatchLogProcessor::builder(exporter, runtime::Tokio).build()) @@ -155,11 +180,11 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope *Requirements:* - Enable the feature flag: - `experimental_logs_batch_log_processor_with_async_runtime`. + `experimental_logs_batch_log_processor_with_async_runtime`. - Continue enabling one of the async runtime feature flags: `rt-tokio`, `rt-tokio-current-thread`, or `rt-async-std`. -- **Breaking** [#2456](https://github.com/open-telemetry/opentelemetry-rust/pull/2456) +- *Breaking* - `BatchSpanProcessor` Updates [#2435](https://github.com/open-telemetry/opentelemetry-rust/pull/2456) `BatchSpanProcessor` no longer requires an async runtime by default. Instead, a dedicated background thread is created to do the batch processing and exporting. @@ -172,6 +197,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope new default implementation does not require a runtime argument. Replace the builder method accordingly: - *Before:* + ```rust let tracer_provider = TracerProvider::builder() .with_span_processor(BatchSpanProcessor::builder(exporter, runtime::Tokio).build()) @@ -179,12 +205,24 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope ``` - *After:* + ```rust let tracer_provider = TracerProvider::builder() .with_span_processor(BatchSpanProcessor::builder(exporter).build()) .build(); ``` + This implementation does not support multiple concurrent exports + (`with_max_concurrent_exports` is not supported). + + The new BatchLogProcessor can be used with OTLP Exporter, and supports + following exporter features: + - `grpc-tonic`: This requires `MeterProvider` to be created within a tokio + runtime. + - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. + + In other words, other clients like `reqwest` and `hyper` are not supported. + 2. *Async Runtime Support* If your application cannot spin up new threads or you prefer using async runtimes, enable the @@ -192,6 +230,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope adjust code as below. - *Before:* + ```rust let tracer_provider = TracerProvider::builder() .with_span_processor(BatchSpanProcessor::builder(exporter, runtime::Tokio).build()) @@ -199,6 +238,7 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope ``` - *After:* + ```rust let tracer_provider = TracerProvider::builder() .with_span_processor(span_processor_with_async_runtime::BatchSpanProcessor::builder(exporter, runtime::Tokio).build()) @@ -207,13 +247,62 @@ metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/ope *Requirements:* - Enable the feature flag: - `experimental_trace_batch_span_processor_with_async_runtime`. + `experimental_trace_batch_span_processor_with_async_runtime`. - Continue enabling one of the async runtime feature flags: `rt-tokio`, `rt-tokio-current-thread`, or `rt-async-std`. - Bug fix: Empty Tracer names are retained as-is instead of replacing with "rust.opentelemetry.io/sdk/tracer" [#2486](https://github.com/open-telemetry/opentelemetry-rust/pull/2486) +- Update `EnvResourceDetector` to allow resource attribute values containing + equal signs (`"="`). [#2120](https://github.com/open-telemetry/opentelemetry-rust/pull/2120) + +- **Breaking** Introduced `experimental_async_runtime` feature for runtime-specific traits. + - Runtime-specific features (`rt-tokio`, `rt-tokio-current-thread`, and `rt-async-std`) + now depend on the `experimental_async_runtime` feature. + - For most users, no action is required. Enabling runtime features such as `rt-tokio`, `rt-tokio-current-thread`, + or `rt-async-std` will automatically enable the `experimental_async_runtime` feature. + - If you're implementing a custom runtime, you must explicitly enable the experimental_async_runtime` feature in your + Cargo.toml and implement the required `Runtime` traits. + +- Removed Metrics Cardinality Limit feature. This was originally introduced in +[#1066](https://github.com/open-telemetry/opentelemetry-rust/pull/1066) with a +hardcoded limit of 2000 and no ability to change it. This feature will be +re-introduced in a future date, along with the ability to change the cardinality +limit. + +- *Breaking* Removed unused `opentelemetry_sdk::Error` enum. +- *Breaking* Resource.get() modified to require reference to Key instead of owned. + Replace `get(Key::from_static_str("key"))` with `get(&Key::from_static_str("key"))` +- *Breaking* (Affects custom Exporter authors only) Moved `ExportError` trait from `opentelemetry::export::ExportError` to `opentelemetry_sdk::ExportError` +- *Breaking (Affects custom SpanExporter, SpanProcessor authors only)*: Rename namespaces for Span exporter structs/traits + before: + `opentelemetry_sdk::export::spans::{ExportResult, SpanData, SpanExporter};` + now: + `opentelemetry_sdk::spans::{ExportResult, SpanData, SpanExporter};` + +- *Breaking (Affects custom LogExporter, LogProcessor authors only)*: Rename namespaces for Log exporter structs/traits. + before: + `opentelemetry_sdk::export::logs::{ExportResult, LogBatch, LogExporter};` + now: + `opentelemetry_sdk::logs::{ExportResult, LogBatch, LogExporter};` + +- *Breaking* `opentelemetry_sdk::LogRecord::default()` method is removed. + The only way to create log record outside opentelemetry_sdk crate is using + `Logger::create_log_record()` method. + +- Rename `opentelemetry_sdk::logs::Builder` to `opentelemetry_sdk::logs::LoggerProviderBuilder`. +- Rename `opentelemetry_sdk::trace::Builder` to `opentelemetry_sdk::trace::TracerProviderBuilder`. + +- *Breaking*: Rename namespaces for InMemoryExporters. (The module is still under "testing" feature flag) + before: + `opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder};` + `opentelemetry_sdk::testing::trace::{InMemorySpanExporter, InMemorySpanExporterBuilder};` + `opentelemetry_sdk::testing::metrics::{InMemoryMetricExporter, InMemoryMetricExporterBuilder};` + now: + `opentelemetry_sdk::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder};` + `opentelemetry_sdk::trace::{InMemorySpanExporter, InMemorySpanExporterBuilder};` + `opentelemetry_sdk::metrics::{InMemoryMetricExporter, InMemoryMetricExporterBuilder};` ## 0.27.1 @@ -223,6 +312,7 @@ Released 2024-Nov-27 - `trace::Config` methods are moving onto `TracerProvider` Builder to be consistent with other signals. See https://github.com/open-telemetry/opentelemetry-rust/pull/2303 for migration guide. `trace::Config` is scheduled to be removed from public API in `v0.28.0`. example: + ```rust // old let tracer_provider: TracerProvider = TracerProvider::builder() @@ -234,6 +324,7 @@ Released 2024-Nov-27 .with_resource(Resource::empty()) .build(); ``` + - `logs::LogData` struct is deprecated, and scheduled to be removed from public API in `v0.28.0`. - Bug fix: Empty Meter names are retained as-is instead of replacing with "rust.opentelemetry.io/sdk/meter" @@ -242,10 +333,10 @@ Released 2024-Nov-27 - Bug fix: Empty Logger names are retained as-is instead of replacing with "rust.opentelemetry.io/sdk/logger" [#2316](https://github.com/open-telemetry/opentelemetry-rust/pull/2316) - + - `Logger::provider`: This method is deprecated as of version `0.27.1`. To be removed in `0.28.0`. - `Logger::instrumentation_scope`: This method is deprecated as of version `0.27.1`. To be removed in `0.28.0` - Migration Guidance: + Migration Guidance: - These methods are intended for log appenders. Keep the clone of the provider handle, instead of depending on above methods. @@ -271,7 +362,7 @@ Released 2024-Nov-11 - **Replaced** - ([#2217](https://github.com/open-telemetry/opentelemetry-rust/pull/2217)): Removed `{Delta,Cumulative}TemporalitySelector::new()` in favor of directly using `Temporality` enum to simplify the configuration of MetricsExporterBuilder with different temporalities. - **Renamed** - - ([#2232](https://github.com/open-telemetry/opentelemetry-rust/pull/2232)): The `init` method used to create instruments has been renamed to `build`. + - ([#2232](https://github.com/open-telemetry/opentelemetry-rust/pull/2232)): The `init` method used to create instruments has been renamed to `build`. Before: ```rust let counter = meter.u64_counter("my_counter").init(); diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 167846dce1..8d4d6da1ed 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -39,7 +39,7 @@ rstest = "0.23.0" temp-env = { workspace = true } [target.'cfg(not(target_os = "windows"))'.dev-dependencies] -pprof = { version = "0.13", features = ["flamegraph", "criterion"] } +pprof = { version = "0.14", features = ["flamegraph", "criterion"] } [features] default = ["trace", "metrics", "logs", "internal-logs"] @@ -49,9 +49,10 @@ logs = ["opentelemetry/logs", "serde_json"] spec_unstable_logs_enabled = ["logs", "opentelemetry/spec_unstable_logs_enabled"] metrics = ["opentelemetry/metrics", "glob", "async-trait"] testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] -rt-tokio = ["tokio", "tokio-stream"] -rt-tokio-current-thread = ["tokio", "tokio-stream"] -rt-async-std = ["async-std"] +experimental_async_runtime = [] +rt-tokio = ["tokio", "tokio-stream", "experimental_async_runtime"] +rt-tokio-current-thread = ["tokio", "tokio-stream", "experimental_async_runtime"] +rt-async-std = ["async-std", "experimental_async_runtime"] internal-logs = ["tracing"] experimental_metrics_periodicreader_with_async_runtime = ["metrics"] spec_unstable_metrics_views = ["metrics"] diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index d57ef26157..7cd9ff5c1c 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -2,8 +2,8 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use opentelemetry::trace::{ SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState, }; -use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::testing::trace::NoopSpanExporter; +use opentelemetry_sdk::trace::SpanData; use opentelemetry_sdk::trace::{ BatchConfigBuilder, BatchSpanProcessor, SpanEvents, SpanLinks, SpanProcessor, }; diff --git a/opentelemetry-sdk/benches/context.rs b/opentelemetry-sdk/benches/context.rs index 87f49942a6..8a9108639a 100644 --- a/opentelemetry-sdk/benches/context.rs +++ b/opentelemetry-sdk/benches/context.rs @@ -9,7 +9,7 @@ use opentelemetry::{ Context, ContextGuard, }; use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, + trace::{ExportResult, SpanData, SpanExporter}, trace::{Sampler, TracerProvider}, }; #[cfg(not(target_os = "windows"))] diff --git a/opentelemetry-sdk/benches/log_exporter.rs b/opentelemetry-sdk/benches/log_exporter.rs index c2ecb78ce9..523725fc7e 100644 --- a/opentelemetry-sdk/benches/log_exporter.rs +++ b/opentelemetry-sdk/benches/log_exporter.rs @@ -20,7 +20,7 @@ use opentelemetry::logs::{LogRecord as _, Logger as _, LoggerProvider as _, Seve use opentelemetry_sdk::logs::LogResult; use opentelemetry::InstrumentationScope; -use opentelemetry_sdk::export::logs::LogBatch; +use opentelemetry_sdk::logs::LogBatch; use opentelemetry_sdk::logs::LogProcessor; use opentelemetry_sdk::logs::LogRecord; use opentelemetry_sdk::logs::LoggerProvider; diff --git a/opentelemetry-sdk/benches/span_builder.rs b/opentelemetry-sdk/benches/span_builder.rs index 6f0c828b07..7f78b738f9 100644 --- a/opentelemetry-sdk/benches/span_builder.rs +++ b/opentelemetry-sdk/benches/span_builder.rs @@ -5,8 +5,8 @@ use opentelemetry::{ KeyValue, }; use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, trace as sdktrace, + trace::{ExportResult, SpanData, SpanExporter}, }; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; diff --git a/opentelemetry-sdk/benches/trace.rs b/opentelemetry-sdk/benches/trace.rs index 36f6acec4a..bdbd4a5ee3 100644 --- a/opentelemetry-sdk/benches/trace.rs +++ b/opentelemetry-sdk/benches/trace.rs @@ -5,8 +5,8 @@ use opentelemetry::{ KeyValue, }; use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, trace as sdktrace, + trace::{ExportResult, SpanData, SpanExporter}, }; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; diff --git a/opentelemetry-sdk/src/error.rs b/opentelemetry-sdk/src/error.rs index 115da17b78..6a108f0cc9 100644 --- a/opentelemetry-sdk/src/error.rs +++ b/opentelemetry-sdk/src/error.rs @@ -1,46 +1,7 @@ //! Wrapper for error from trace, logs and metrics part of open telemetry. -use std::sync::PoisonError; -#[cfg(feature = "logs")] -use crate::logs::LogError; -#[cfg(feature = "metrics")] -use crate::metrics::MetricError; -use opentelemetry::propagation::PropagationError; -#[cfg(feature = "trace")] -use opentelemetry::trace::TraceError; - -/// Wrapper for error from both tracing and metrics part of open telemetry. -#[derive(thiserror::Error, Debug)] -#[non_exhaustive] -pub enum Error { - #[cfg(feature = "trace")] - #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] - #[error(transparent)] - /// Failed to export traces. - Trace(#[from] TraceError), - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - #[error(transparent)] - /// An issue raised by the metrics module. - Metric(#[from] MetricError), - - #[cfg(feature = "logs")] - #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] - #[error(transparent)] - /// Failed to export logs. - Log(#[from] LogError), - - #[error(transparent)] - /// Error happens when injecting and extracting information using propagators. - Propagation(#[from] PropagationError), - - #[error("{0}")] - /// Other types of failures not covered by the variants above. - Other(String), -} - -impl From> for Error { - fn from(err: PoisonError) -> Self { - Error::Other(err.to_string()) - } +/// Trait for errors returned by exporters +pub trait ExportError: std::error::Error + Send + Sync + 'static { + /// The name of exporter that returned this error + fn exporter_name(&self) -> &'static str; } diff --git a/opentelemetry-sdk/src/export/mod.rs b/opentelemetry-sdk/src/export/mod.rs deleted file mode 100644 index 21dc2b570c..0000000000 --- a/opentelemetry-sdk/src/export/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Telemetry Export - -#[cfg(feature = "logs")] -#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] -pub mod logs; - -#[cfg(feature = "trace")] -#[cfg_attr(docsrs, doc(cfg(feature = "trace")))] -pub mod trace; - -/// Trait for errors returned by exporters -pub trait ExportError: std::error::Error + Send + Sync + 'static { - /// The name of exporter that returned this error - fn exporter_name(&self) -> &'static str; -} diff --git a/opentelemetry-sdk/src/lib.rs b/opentelemetry-sdk/src/lib.rs index 4afda7deb7..bc57acff9f 100644 --- a/opentelemetry-sdk/src/lib.rs +++ b/opentelemetry-sdk/src/lib.rs @@ -16,7 +16,7 @@ //! //! fn main() { //! // Choose an exporter like `opentelemetry_stdout::SpanExporter` -//! # fn example(new_exporter: impl Fn() -> T) { +//! # fn example(new_exporter: impl Fn() -> T) { //! let exporter = new_exporter(); //! //! // Create a new trace pipeline that prints to stdout @@ -44,10 +44,7 @@ //! [examples]: https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples //! [`trace`]: https://docs.rs/opentelemetry/latest/opentelemetry/trace/index.html //! -//! # Metrics (Alpha) -//! -//! Note: the metrics implementation is **still in progress** and **subject to major -//! changes**. +//! # Metrics //! //! ### Creating instruments and recording measurements //! @@ -94,6 +91,7 @@ //! Support for recording and exporting telemetry asynchronously and perform //! metrics aggregation can be added via the following flags: //! +//! * `experimental_async_runtime`: Enables the experimental `Runtime` trait and related functionality. //! * `rt-tokio`: Spawn telemetry tasks using [tokio]'s multi-thread runtime. //! * `rt-tokio-current-thread`: Spawn telemetry tasks on a separate runtime so that the main runtime won't be blocked. //! * `rt-async-std`: Spawn telemetry tasks using [async-std]'s runtime. @@ -120,7 +118,6 @@ )] #![cfg_attr(test, deny(warnings))] -pub mod export; pub(crate) mod growable_array; #[cfg(feature = "logs")] @@ -133,6 +130,7 @@ pub mod metrics; #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod propagation; pub mod resource; +#[cfg(feature = "experimental_async_runtime")] pub mod runtime; #[cfg(any(feature = "testing", test))] #[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] @@ -150,3 +148,4 @@ pub mod util; pub use resource::Resource; pub mod error; +pub use error::ExportError; diff --git a/opentelemetry-sdk/src/logs/error.rs b/opentelemetry-sdk/src/logs/error.rs index 4f33ba6dbf..d9b1b42157 100644 --- a/opentelemetry-sdk/src/logs/error.rs +++ b/opentelemetry-sdk/src/logs/error.rs @@ -1,4 +1,4 @@ -use crate::export::ExportError; +use crate::ExportError; use std::{sync::PoisonError, time::Duration}; use thiserror::Error; diff --git a/opentelemetry-sdk/src/export/logs/mod.rs b/opentelemetry-sdk/src/logs/export.rs similarity index 100% rename from opentelemetry-sdk/src/export/logs/mod.rs rename to opentelemetry-sdk/src/logs/export.rs diff --git a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs b/opentelemetry-sdk/src/logs/in_memory_exporter.rs similarity index 93% rename from opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs rename to opentelemetry-sdk/src/logs/in_memory_exporter.rs index dff6d93c7e..0df3a0caeb 100644 --- a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/logs/in_memory_exporter.rs @@ -1,6 +1,5 @@ -use crate::export::logs::{LogBatch, LogExporter}; use crate::logs::LogRecord; -use crate::logs::{LogError, LogResult}; +use crate::logs::{LogBatch, LogError, LogExporter, LogResult}; use crate::Resource; use opentelemetry::InstrumentationScope; use std::borrow::Cow; @@ -16,7 +15,7 @@ use std::sync::{Arc, Mutex}; /// ```no_run ///# use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider}; ///# use opentelemetry_sdk::runtime; -///# use opentelemetry_sdk::testing::logs::InMemoryLogExporter; +///# use opentelemetry_sdk::logs::InMemoryLogExporter; /// ///# #[tokio::main] ///# async fn main() { @@ -73,7 +72,7 @@ pub struct LogDataWithResource { /// # Example /// /// ```no_run -///# use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; +///# use opentelemetry_sdk::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; ///# use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider}; ///# use opentelemetry_sdk::runtime; /// @@ -140,7 +139,7 @@ impl InMemoryLogExporter { /// # Example /// /// ``` - /// use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; + /// use opentelemetry_sdk::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; /// /// let exporter = InMemoryLogExporterBuilder::default().build(); /// let emitted_logs = exporter.get_emitted_logs().unwrap(); @@ -165,7 +164,7 @@ impl InMemoryLogExporter { /// # Example /// /// ``` - /// use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; + /// use opentelemetry_sdk::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; /// /// let exporter = InMemoryLogExporterBuilder::default().build(); /// exporter.reset(); diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index c70654ef56..222c9e186b 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -32,8 +32,7 @@ //! ``` use crate::{ - export::logs::{ExportResult, LogBatch, LogExporter}, - logs::{LogError, LogRecord, LogResult}, + logs::{ExportResult, LogBatch, LogError, LogExporter, LogRecord, LogResult}, Resource, }; use std::sync::mpsc::{self, RecvTimeoutError, SyncSender}; @@ -104,16 +103,26 @@ pub trait LogProcessor: Send + Sync + Debug { } /// A [`LogProcessor`] designed for testing and debugging purpose, that immediately -/// exports log records as they are emitted. +/// exports log records as they are emitted. Log records are exported synchronously +/// in the same thread that emits the log record. +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires LoggerProvider to be created within a tokio +/// runtime. Logs can be emitted from any thread, including tokio runtime +/// threads. +/// - `reqwest-blocking-client`: LoggerProvider may be created anywhere, but +/// logs must be emitted from a non-tokio runtime thread. +/// - `reqwest-client`: LoggerProvider may be created anywhere, but logs must be +/// emitted from a tokio runtime thread. +/// /// ## Example /// /// ### Using a SimpleLogProcessor /// /// ```rust -/// use opentelemetry_sdk::logs::{SimpleLogProcessor, LoggerProvider}; +/// use opentelemetry_sdk::logs::{SimpleLogProcessor, LoggerProvider, LogExporter}; /// use opentelemetry::global; -/// use opentelemetry_sdk::export::logs::LogExporter; -/// use opentelemetry_sdk::testing::logs::InMemoryLogExporter; +/// use opentelemetry_sdk::logs::InMemoryLogExporter; /// /// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter /// let provider = LoggerProvider::builder() @@ -223,13 +232,21 @@ type LogsData = Box<(LogRecord, InstrumentationScope)>; /// - **Export timeout**: Maximum duration allowed for an export operation. /// - **Scheduled delay**: Frequency at which the batch is exported. /// +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires `MeterProvider` to be created within a tokio +/// runtime. +/// - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. +/// +/// In other words, other clients like `reqwest` and `hyper` are not supported. +/// /// ### Using a BatchLogProcessor: /// /// ```rust /// use opentelemetry_sdk::logs::{BatchLogProcessor, BatchConfigBuilder, LoggerProvider}; /// use opentelemetry::global; /// use std::time::Duration; -/// use opentelemetry_sdk::testing::logs::InMemoryLogExporter; +/// use opentelemetry_sdk::logs::InMemoryLogExporter; /// /// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter /// let processor = BatchLogProcessor::builder(exporter) @@ -799,19 +816,17 @@ mod tests { BatchLogProcessor, OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, }; - use crate::export::logs::{LogBatch, LogExporter}; - use crate::logs::LogRecord; use crate::logs::LogResult; - use crate::testing::logs::InMemoryLogExporterBuilder; + use crate::logs::{LogBatch, LogExporter, LogRecord}; use crate::{ logs::{ log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, }, - BatchConfig, BatchConfigBuilder, LogProcessor, LoggerProvider, SimpleLogProcessor, + BatchConfig, BatchConfigBuilder, InMemoryLogExporter, InMemoryLogExporterBuilder, + LogProcessor, LoggerProvider, SimpleLogProcessor, }, - testing::logs::InMemoryLogExporter, Resource, }; use opentelemetry::logs::AnyValue; @@ -1064,7 +1079,7 @@ mod tests { .build(); let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - let mut record = LogRecord::default(); + let mut record = LogRecord::new(); let instrumentation = InstrumentationScope::default(); processor.emit(&mut record, &instrumentation); @@ -1082,7 +1097,7 @@ mod tests { .build(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1240,7 +1255,7 @@ mod tests { let exporter = InMemoryLogExporterBuilder::default().build(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1253,7 +1268,7 @@ mod tests { let exporter = InMemoryLogExporterBuilder::default().build(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1270,7 +1285,7 @@ mod tests { for _ in 0..10 { let processor_clone = Arc::clone(&processor); let handle = tokio::spawn(async move { - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor_clone.emit(&mut record, &instrumentation); }); @@ -1289,7 +1304,7 @@ mod tests { let exporter = InMemoryLogExporterBuilder::default().build(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1341,7 +1356,7 @@ mod tests { let exporter = LogExporterThatRequiresTokio::new(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); // This will panic because an tokio async operation within exporter without a runtime. @@ -1397,7 +1412,7 @@ mod tests { for _ in 0..concurrent_emit { let processor_clone = Arc::clone(&processor); let handle = tokio::spawn(async move { - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor_clone.emit(&mut record, &instrumentation); }); @@ -1421,7 +1436,7 @@ mod tests { let exporter = LogExporterThatRequiresTokio::new(); let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1440,7 +1455,7 @@ mod tests { let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -1460,7 +1475,7 @@ mod tests { let processor = SimpleLogProcessor::new(exporter.clone()); - let mut record: LogRecord = Default::default(); + let mut record: LogRecord = LogRecord::new(); let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); diff --git a/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs b/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs index b5a3df2197..765d1ebd98 100644 --- a/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs +++ b/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs @@ -1,6 +1,5 @@ use crate::{ - export::logs::{ExportResult, LogBatch, LogExporter}, - logs::{LogError, LogRecord, LogResult}, + logs::{ExportResult, LogBatch, LogError, LogExporter, LogRecord, LogResult}, Resource, }; @@ -282,25 +281,25 @@ where #[cfg(all(test, feature = "testing", feature = "logs"))] mod tests { - use crate::export::logs::{LogBatch, LogExporter}; use crate::logs::log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, }; use crate::logs::log_processor_with_async_runtime::BatchLogProcessor; + use crate::logs::InMemoryLogExporterBuilder; use crate::logs::LogRecord; use crate::logs::LogResult; + use crate::logs::{LogBatch, LogExporter}; use crate::runtime; - use crate::testing::logs::InMemoryLogExporterBuilder; use crate::{ logs::{ log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, }, - BatchConfig, BatchConfigBuilder, LogProcessor, LoggerProvider, SimpleLogProcessor, + BatchConfig, BatchConfigBuilder, InMemoryLogExporter, LogProcessor, LoggerProvider, + SimpleLogProcessor, }, - testing::logs::InMemoryLogExporter, Resource, }; use opentelemetry::logs::AnyValue; @@ -556,7 +555,7 @@ mod tests { let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default(), runtime::Tokio); - let mut record = LogRecord::default(); + let mut record = LogRecord::new(); let instrumentation = InstrumentationScope::default(); processor.emit(&mut record, &instrumentation); @@ -818,7 +817,7 @@ mod tests { let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default(), runtime::Tokio); - let mut record = LogRecord::default(); + let mut record = LogRecord::new(); let instrumentation = InstrumentationScope::default(); processor.emit(&mut record, &instrumentation); diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/logger_provider.rs similarity index 93% rename from opentelemetry-sdk/src/logs/log_emitter.rs rename to opentelemetry-sdk/src/logs/logger_provider.rs index ba63896277..6287387d1b 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/logger_provider.rs @@ -1,12 +1,12 @@ use super::{BatchLogProcessor, LogProcessor, LogRecord, SimpleLogProcessor, TraceContext}; -use crate::{export::logs::LogExporter, Resource}; -use crate::{logs::LogError, logs::LogResult}; +use crate::logs::{LogError, LogExporter, LogResult}; +use crate::Resource; use opentelemetry::{otel_debug, otel_info, trace::TraceContextExt, Context, InstrumentationScope}; #[cfg(feature = "spec_unstable_logs_enabled")] use opentelemetry::logs::Severity; -use std::time::SystemTime; +use opentelemetry::time::now; use std::{ borrow::Cow, sync::{ @@ -78,8 +78,8 @@ impl opentelemetry::logs::LoggerProvider for LoggerProvider { impl LoggerProvider { /// Create a new `LoggerProvider` builder. - pub fn builder() -> Builder { - Builder::default() + pub fn builder() -> LoggerProviderBuilder { + LoggerProviderBuilder::default() } pub(crate) fn log_processors(&self) -> &[Box] { @@ -179,37 +179,67 @@ impl Drop for LoggerProviderInner { #[derive(Debug, Default)] /// Builder for provider attributes. -pub struct Builder { +pub struct LoggerProviderBuilder { processors: Vec>, resource: Option, } -impl Builder { - /// The `LogExporter` that this provider should use. +impl LoggerProviderBuilder { + /// Adds a [SimpleLogProcessor] with the configured exporter to the pipeline. + /// + /// # Arguments + /// + /// * `exporter` - The exporter to be used by the SimpleLogProcessor. + /// + /// # Returns + /// + /// A new `Builder` instance with the SimpleLogProcessor added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_simple_exporter(self, exporter: T) -> Self { let mut processors = self.processors; processors.push(Box::new(SimpleLogProcessor::new(exporter))); - Builder { processors, ..self } + LoggerProviderBuilder { processors, ..self } } - /// The `LogExporter` setup using a default `BatchLogProcessor` that this provider should use. + /// Adds a [BatchLogProcessor] with the configured exporter to the pipeline. + /// + /// # Arguments + /// + /// * `exporter` - The exporter to be used by the BatchLogProcessor. + /// + /// # Returns + /// + /// A new `Builder` instance with the BatchLogProcessor added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_batch_exporter(self, exporter: T) -> Self { let batch = BatchLogProcessor::builder(exporter).build(); self.with_log_processor(batch) } - /// The `LogProcessor` that this provider should use. + /// Adds a custom [LogProcessor] to the pipeline. + /// + /// # Arguments + /// + /// * `processor` - The `LogProcessor` to be added. + /// + /// # Returns + /// + /// A new `Builder` instance with the custom `LogProcessor` added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_log_processor(self, processor: T) -> Self { let mut processors = self.processors; processors.push(Box::new(processor)); - Builder { processors, ..self } + LoggerProviderBuilder { processors, ..self } } /// The `Resource` to be associated with this Provider. pub fn with_resource(self, resource: Resource) -> Self { - Builder { + LoggerProviderBuilder { resource: Some(resource), ..self } @@ -263,7 +293,7 @@ impl opentelemetry::logs::Logger for Logger { type LogRecord = LogRecord; fn create_log_record(&self) -> Self::LogRecord { - LogRecord::default() + LogRecord::new() } /// Emit a `LogRecord`. @@ -283,7 +313,7 @@ impl opentelemetry::logs::Logger for Logger { } } if record.observed_timestamp.is_none() { - record.observed_timestamp = Some(SystemTime::now()); + record.observed_timestamp = Some(now()); } for p in processors { @@ -293,23 +323,20 @@ impl opentelemetry::logs::Logger for Logger { #[cfg(feature = "spec_unstable_logs_enabled")] fn event_enabled(&self, level: Severity, target: &str) -> bool { - let provider = &self.provider; - - let mut enabled = false; - for processor in provider.log_processors() { - enabled = enabled || processor.event_enabled(level, target, self.scope.name().as_ref()); - } - enabled + self.provider + .log_processors() + .iter() + .any(|processor| processor.event_enabled(level, target, self.scope.name().as_ref())) } } #[cfg(test)] mod tests { use crate::{ + logs::InMemoryLogExporter, resource::{ SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, }, - testing::logs::InMemoryLogExporter, trace::TracerProvider, Resource, }; @@ -376,22 +403,22 @@ mod tests { assert_eq!( provider .resource() - .get(Key::from_static_str(resource_key)) + .get(&Key::from_static_str(resource_key)) .map(|v| v.to_string()), expect.map(|s| s.to_string()) ); }; let assert_telemetry_resource = |provider: &super::LoggerProvider| { assert_eq!( - provider.resource().get(TELEMETRY_SDK_LANGUAGE.into()), + provider.resource().get(&TELEMETRY_SDK_LANGUAGE.into()), Some(Value::from("rust")) ); assert_eq!( - provider.resource().get(TELEMETRY_SDK_NAME.into()), + provider.resource().get(&TELEMETRY_SDK_NAME.into()), Some(Value::from("opentelemetry")) ); assert_eq!( - provider.resource().get(TELEMETRY_SDK_VERSION.into()), + provider.resource().get(&TELEMETRY_SDK_VERSION.into()), Some(Value::from(env!("CARGO_PKG_VERSION"))) ); }; diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 97ae74ee85..ce546fc938 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -1,15 +1,25 @@ //! # OpenTelemetry Log SDK mod error; -mod log_emitter; +mod export; mod log_processor; +mod logger_provider; pub(crate) mod record; +/// In-Memory log exporter for testing purpose. +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub mod in_memory_exporter; +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub use in_memory_exporter::{InMemoryLogExporter, InMemoryLogExporterBuilder}; + pub use error::{LogError, LogResult}; -pub use log_emitter::{Builder, Logger, LoggerProvider}; +pub use export::{ExportResult, LogBatch, LogExporter}; pub use log_processor::{ BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, SimpleLogProcessor, }; +pub use logger_provider::{Logger, LoggerProvider, LoggerProviderBuilder}; pub use record::{LogRecord, TraceContext}; #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] @@ -19,7 +29,6 @@ pub mod log_processor_with_async_runtime; #[cfg(all(test, feature = "testing"))] mod tests { use super::*; - use crate::testing::logs::InMemoryLogExporter; use crate::Resource; use opentelemetry::logs::LogRecord; use opentelemetry::logs::{Logger, LoggerProvider as _, Severity}; diff --git a/opentelemetry-sdk/src/logs/record.rs b/opentelemetry-sdk/src/logs/record.rs index 3e4f5c8c18..740ee14c97 100644 --- a/opentelemetry-sdk/src/logs/record.rs +++ b/opentelemetry-sdk/src/logs/record.rs @@ -18,7 +18,7 @@ const PREALLOCATED_ATTRIBUTE_CAPACITY: usize = 5; pub(crate) type LogRecordAttributes = GrowableArray, PREALLOCATED_ATTRIBUTE_CAPACITY>; -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] #[non_exhaustive] /// LogRecord represents all data carried by a log record, and /// is provided to `LogExporter`s as input. @@ -118,6 +118,21 @@ impl opentelemetry::logs::LogRecord for LogRecord { } impl LogRecord { + /// Crate only default constructor + pub(crate) fn new() -> Self { + LogRecord { + event_name: None, + target: None, + timestamp: None, + observed_timestamp: None, + trace_context: None, + severity_text: None, + severity_number: None, + body: None, + attributes: LogRecordAttributes::default(), + } + } + /// Returns the event name #[inline] pub fn event_name(&self) -> Option<&'static str> { @@ -220,21 +235,21 @@ mod tests { #[test] fn test_set_eventname() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); log_record.set_event_name("test_event"); assert_eq!(log_record.event_name, Some("test_event")); } #[test] fn test_set_target() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); log_record.set_target("foo::bar"); assert_eq!(log_record.target, Some(Cow::Borrowed("foo::bar"))); } #[test] fn test_set_timestamp() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); let now = SystemTime::now(); log_record.set_timestamp(now); assert_eq!(log_record.timestamp, Some(now)); @@ -242,7 +257,7 @@ mod tests { #[test] fn test_set_observed_timestamp() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); let now = SystemTime::now(); log_record.set_observed_timestamp(now); assert_eq!(log_record.observed_timestamp, Some(now)); @@ -250,14 +265,14 @@ mod tests { #[test] fn test_set_severity_text() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); log_record.set_severity_text("ERROR"); assert_eq!(log_record.severity_text, Some("ERROR")); } #[test] fn test_set_severity_number() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); let severity_number = Severity::Error; log_record.set_severity_number(severity_number); assert_eq!(log_record.severity_number, Some(Severity::Error)); @@ -265,7 +280,7 @@ mod tests { #[test] fn test_set_body() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); let body = AnyValue::String("Test body".into()); log_record.set_body(body.clone()); assert_eq!(log_record.body, Some(body)); @@ -273,7 +288,7 @@ mod tests { #[test] fn test_set_attributes() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); let attributes = vec![(Key::new("key"), AnyValue::String("value".into()))]; log_record.add_attributes(attributes.clone()); for (key, value) in attributes { @@ -283,7 +298,7 @@ mod tests { #[test] fn test_set_attribute() { - let mut log_record = LogRecord::default(); + let mut log_record = LogRecord::new(); log_record.add_attribute("key", "value"); let key = Key::new("key"); let value = AnyValue::String("value".into()); @@ -344,12 +359,12 @@ mod tests { fn compare_log_record_target_borrowed_eq_owned() { let log_record_borrowed = LogRecord { event_name: Some("test_event"), - ..Default::default() + ..LogRecord::new() }; let log_record_owned = LogRecord { event_name: Some("test_event"), - ..Default::default() + ..LogRecord::new() }; assert_eq!(log_record_borrowed, log_record_owned); diff --git a/opentelemetry-sdk/src/metrics/error.rs b/opentelemetry-sdk/src/metrics/error.rs index cb8afcab0e..9bf9e1d99d 100644 --- a/opentelemetry-sdk/src/metrics/error.rs +++ b/opentelemetry-sdk/src/metrics/error.rs @@ -2,7 +2,7 @@ use std::result; use std::sync::PoisonError; use thiserror::Error; -use crate::export::ExportError; +use crate::ExportError; /// A specialized `Result` type for metric operations. pub type MetricResult = result::Result; diff --git a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs b/opentelemetry-sdk/src/metrics/in_memory_exporter.rs similarity index 93% rename from opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs rename to opentelemetry-sdk/src/metrics/in_memory_exporter.rs index 1d6f9c2754..2c5d988cf3 100644 --- a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/metrics/in_memory_exporter.rs @@ -1,4 +1,4 @@ -use crate::metrics::data; +use crate::metrics::data::{self, Gauge, Sum}; use crate::metrics::data::{Histogram, Metric, ResourceMetrics, ScopeMetrics}; use crate::metrics::exporter::PushMetricExporter; use crate::metrics::MetricError; @@ -27,7 +27,7 @@ use std::sync::{Arc, Mutex}; ///# use opentelemetry_sdk::metrics; ///# use opentelemetry::{KeyValue}; ///# use opentelemetry::metrics::MeterProvider; -///# use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; +///# use opentelemetry_sdk::metrics::InMemoryMetricExporter; ///# use opentelemetry_sdk::metrics::PeriodicReader; /// ///# #[tokio::main] @@ -86,7 +86,7 @@ impl Default for InMemoryMetricExporter { /// # Example /// /// ``` -/// # use opentelemetry_sdk::testing::metrics::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; +/// # use opentelemetry_sdk::metrics::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; /// /// let exporter = InMemoryMetricExporterBuilder::new().build(); /// ``` @@ -138,7 +138,7 @@ impl InMemoryMetricExporter { /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; + /// # use opentelemetry_sdk::metrics::InMemoryMetricExporter; /// /// let exporter = InMemoryMetricExporter::default(); /// let finished_metrics = exporter.get_finished_metrics().unwrap(); @@ -155,7 +155,7 @@ impl InMemoryMetricExporter { /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; + /// # use opentelemetry_sdk::metrics::InMemoryMetricExporter; /// /// let exporter = InMemoryMetricExporter::default(); /// exporter.reset(); @@ -213,7 +213,7 @@ impl InMemoryMetricExporter { time: hist.time, temporality: hist.temporality, })) - } else if let Some(sum) = data.as_any().downcast_ref::>() { + } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), start_time: sum.start_time, @@ -221,7 +221,7 @@ impl InMemoryMetricExporter { temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) - } else if let Some(sum) = data.as_any().downcast_ref::>() { + } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), start_time: sum.start_time, @@ -229,7 +229,7 @@ impl InMemoryMetricExporter { temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) - } else if let Some(sum) = data.as_any().downcast_ref::>() { + } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), start_time: sum.start_time, @@ -237,19 +237,19 @@ impl InMemoryMetricExporter { temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) - } else if let Some(gauge) = data.as_any().downcast_ref::>() { + } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), start_time: gauge.start_time, time: gauge.time, })) - } else if let Some(gauge) = data.as_any().downcast_ref::>() { + } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), start_time: gauge.start_time, time: gauge.time, })) - } else if let Some(gauge) = data.as_any().downcast_ref::>() { + } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), start_time: gauge.start_time, diff --git a/opentelemetry-sdk/src/metrics/internal/aggregate.rs b/opentelemetry-sdk/src/metrics/internal/aggregate.rs index fc9d5975c3..8713bce3c4 100644 --- a/opentelemetry-sdk/src/metrics/internal/aggregate.rs +++ b/opentelemetry-sdk/src/metrics/internal/aggregate.rs @@ -18,8 +18,11 @@ use super::{ pub(crate) const STREAM_CARDINALITY_LIMIT: usize = 2000; /// Checks whether aggregator has hit cardinality limit for metric streams -pub(crate) fn is_under_cardinality_limit(size: usize) -> bool { - size < STREAM_CARDINALITY_LIMIT +pub(crate) fn is_under_cardinality_limit(_size: usize) -> bool { + true + + // TODO: Implement this feature, after allowing the ability to customize the cardinality limit. + // size < STREAM_CARDINALITY_LIMIT } /// Receives measurements to be aggregated. diff --git a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs index 170f4a068d..995bc156e7 100644 --- a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs @@ -4,7 +4,7 @@ use opentelemetry::{otel_debug, KeyValue}; use std::sync::OnceLock; use crate::metrics::{ - data::{self, Aggregation}, + data::{self, Aggregation, ExponentialHistogram}, Temporality, }; @@ -386,7 +386,7 @@ impl ExpoHistogram { fn delta(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { let time = self.init_time.delta(); - let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::ExponentialHistogram { data_points: vec![], @@ -443,7 +443,7 @@ impl ExpoHistogram { ) -> (usize, Option>) { let time = self.init_time.cumulative(); - let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::ExponentialHistogram { data_points: vec![], @@ -528,6 +528,7 @@ where mod tests { use std::{ops::Neg, time::SystemTime}; + use data::{ExponentialHistogram, Gauge, Histogram, Sum}; use tests::internal::AggregateFns; use crate::metrics::internal::{self, AggregateBuilder}; @@ -1468,8 +1469,8 @@ mod tests { test_name ); - if let Some(a) = a.as_any().downcast_ref::>() { - let b = b.as_any().downcast_ref::>().unwrap(); + if let Some(a) = a.as_any().downcast_ref::>() { + let b = b.as_any().downcast_ref::>().unwrap(); assert_eq!( a.data_points.len(), b.data_points.len(), @@ -1479,8 +1480,8 @@ mod tests { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { assert_gauge_data_points_eq(a, b, "mismatching gauge data points", test_name); } - } else if let Some(a) = a.as_any().downcast_ref::>() { - let b = b.as_any().downcast_ref::>().unwrap(); + } else if let Some(a) = a.as_any().downcast_ref::>() { + let b = b.as_any().downcast_ref::>().unwrap(); assert_eq!( a.temporality, b.temporality, "{} mismatching sum temporality", @@ -1500,8 +1501,8 @@ mod tests { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { assert_sum_data_points_eq(a, b, "mismatching sum data points", test_name); } - } else if let Some(a) = a.as_any().downcast_ref::>() { - let b = b.as_any().downcast_ref::>().unwrap(); + } else if let Some(a) = a.as_any().downcast_ref::>() { + let b = b.as_any().downcast_ref::>().unwrap(); assert_eq!( a.temporality, b.temporality, "{}: mismatching hist temporality", @@ -1516,10 +1517,10 @@ mod tests { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { assert_hist_data_points_eq(a, b, "mismatching hist data points", test_name); } - } else if let Some(a) = a.as_any().downcast_ref::>() { + } else if let Some(a) = a.as_any().downcast_ref::>() { let b = b .as_any() - .downcast_ref::>() + .downcast_ref::>() .unwrap(); assert_eq!( a.temporality, b.temporality, diff --git a/opentelemetry-sdk/src/metrics/internal/last_value.rs b/opentelemetry-sdk/src/metrics/internal/last_value.rs index cc2176b897..b14c86047e 100644 --- a/opentelemetry-sdk/src/metrics/internal/last_value.rs +++ b/opentelemetry-sdk/src/metrics/internal/last_value.rs @@ -1,5 +1,5 @@ use crate::metrics::{ - data::{self, Aggregation, GaugeDataPoint}, + data::{self, Aggregation, Gauge, GaugeDataPoint}, Temporality, }; use opentelemetry::KeyValue; @@ -65,7 +65,7 @@ impl LastValue { ) -> (usize, Option>) { let time = self.init_time.delta(); - let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Gauge { data_points: vec![], @@ -97,7 +97,7 @@ impl LastValue { dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { let time = self.init_time.cumulative(); - let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Gauge { data_points: vec![], diff --git a/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs b/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs index b2f478e078..c035dbe696 100644 --- a/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs @@ -1,6 +1,6 @@ use opentelemetry::KeyValue; -use crate::metrics::data::{self, Aggregation, SumDataPoint}; +use crate::metrics::data::{self, Aggregation, Sum, SumDataPoint}; use crate::metrics::Temporality; use super::aggregate::{AggregateTimeInitiator, AttributeSetFilter}; @@ -40,7 +40,7 @@ impl PrecomputedSum { ) -> (usize, Option>) { let time = self.init_time.delta(); - let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], @@ -91,7 +91,7 @@ impl PrecomputedSum { ) -> (usize, Option>) { let time = self.init_time.cumulative(); - let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index 011de1f41c..881082714f 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -319,7 +319,7 @@ mod tests { assert_eq!( provider.inner.pipes.0[0] .resource - .get(Key::from_static_str(resource_key)) + .get(&Key::from_static_str(resource_key)) .map(|v| v.to_string()), expect.map(|s| s.to_string()) ); @@ -328,19 +328,19 @@ mod tests { assert_eq!( provider.inner.pipes.0[0] .resource - .get(TELEMETRY_SDK_LANGUAGE.into()), + .get(&TELEMETRY_SDK_LANGUAGE.into()), Some(Value::from("rust")) ); assert_eq!( provider.inner.pipes.0[0] .resource - .get(TELEMETRY_SDK_NAME.into()), + .get(&TELEMETRY_SDK_NAME.into()), Some(Value::from("opentelemetry")) ); assert_eq!( provider.inner.pipes.0[0] .resource - .get(TELEMETRY_SDK_VERSION.into()), + .get(&TELEMETRY_SDK_VERSION.into()), Some(Value::from(env!("CARGO_PKG_VERSION"))) ); }; diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 5faeba724a..798db18acb 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -57,6 +57,14 @@ pub(crate) mod pipeline; pub mod reader; pub(crate) mod view; +/// In-Memory metric exporter for testing purpose. +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub mod in_memory_exporter; +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub use in_memory_exporter::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; + pub use aggregation::*; pub use error::{MetricError, MetricResult}; pub use manual_reader::*; @@ -105,10 +113,14 @@ pub enum Temporality { mod tests { use self::data::{HistogramDataPoint, ScopeMetrics, SumDataPoint}; use super::*; + use crate::metrics::data::Aggregation; use crate::metrics::data::ResourceMetrics; - use crate::testing::metrics::InMemoryMetricExporter; - use crate::testing::metrics::InMemoryMetricExporterBuilder; + use crate::metrics::InMemoryMetricExporter; + use crate::metrics::InMemoryMetricExporterBuilder; + use data::Gauge; use data::GaugeDataPoint; + use data::Histogram; + use data::Sum; use opentelemetry::metrics::{Counter, Meter, UpDownCounter}; use opentelemetry::InstrumentationScope; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; @@ -225,7 +237,7 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(sum.is_monotonic, "Should produce monotonic."); @@ -248,7 +260,7 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(sum.is_monotonic, "Should produce monotonic."); @@ -259,11 +271,13 @@ mod tests { assert_eq!(data_point.value, 50, "Unexpected data point value"); } + #[ignore = "https://github.com/open-telemetry/opentelemetry-rust/issues/1065"] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_aggregation_overflow_delta() { counter_aggregation_overflow_helper(Temporality::Delta); } + #[ignore = "https://github.com/open-telemetry/opentelemetry-rust/issues/1065"] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_aggregation_overflow_cumulative() { counter_aggregation_overflow_helper(Temporality::Cumulative); @@ -448,7 +462,7 @@ mod tests { for (iter, v) in values_clone.iter().enumerate() { test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_observable_counter", None); + let sum = test_context.get_aggregation::>("my_observable_counter", None); assert_eq!(sum.data_points.len(), 1); assert!(sum.is_monotonic, "Counter should produce monotonic."); if let Temporality::Cumulative = temporality { @@ -565,7 +579,7 @@ mod tests { let sum = metric .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for Counter instruments by default"); // Expecting 1 time-series. @@ -631,7 +645,7 @@ mod tests { let sum1 = metric1 .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for Counter instruments by default"); // Expecting 1 time-series. @@ -651,7 +665,7 @@ mod tests { let sum2 = metric2 .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for Counter instruments by default"); // Expecting 1 time-series. @@ -735,7 +749,7 @@ mod tests { let sum = metric .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for Counter instruments by default"); // Expecting 1 time-series. @@ -755,7 +769,7 @@ mod tests { let reader = PeriodicReader::builder(exporter.clone()).build(); let criteria = Instrument::new().name("test_histogram"); let stream_invalid_aggregation = Stream::new() - .aggregation(Aggregation::ExplicitBucketHistogram { + .aggregation(aggregation::Aggregation::ExplicitBucketHistogram { boundaries: vec![0.9, 1.9, 1.2, 1.3, 1.4, 1.5], // invalid boundaries record_min_max: false, }) @@ -858,7 +872,7 @@ mod tests { let sum = metric .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for ObservableCounter instruments by default"); // Expecting 1 time-series only, as the view drops all attributes resulting @@ -935,7 +949,7 @@ mod tests { let sum = metric .data .as_any() - .downcast_ref::>() + .downcast_ref::>() .expect("Sum aggregation expected for Counter instruments by default"); // Expecting 1 time-series only, as the view drops all attributes resulting @@ -955,7 +969,7 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", Some("my_unit")); + let sum = test_context.get_aggregation::>("my_counter", Some("my_unit")); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(!sum.is_monotonic, "Should not produce monotonic."); @@ -978,7 +992,7 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", Some("my_unit")); + let sum = test_context.get_aggregation::>("my_counter", Some("my_unit")); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(!sum.is_monotonic, "Should not produce monotonic."); @@ -1000,12 +1014,12 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let _ = test_context.get_aggregation::>("my_counter", None); + let _ = test_context.get_aggregation::>("my_counter", None); test_context.reset_metrics(); counter.add(5, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(sum.is_monotonic, "Should produce monotonic."); @@ -1027,12 +1041,12 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let _ = test_context.get_aggregation::>("my_counter", None); + let _ = test_context.get_aggregation::>("my_counter", None); test_context.reset_metrics(); counter.add(5, &[]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(sum.is_monotonic, "Should produce monotonic."); @@ -1054,12 +1068,12 @@ mod tests { counter.add(50, &[]); test_context.flush_metrics(); - let _ = test_context.get_aggregation::>("my_counter", None); + let _ = test_context.get_aggregation::>("my_counter", None); test_context.reset_metrics(); counter.add(50, &[KeyValue::new("a", "b")]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); let no_attr_data_point = sum.data_points.iter().find(|x| x.attributes.is_empty()); @@ -1090,7 +1104,7 @@ mod tests { counter.add(1, &[KeyValue::new("key1", "value2")]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); // Expecting 2 time-series. assert_eq!(sum.data_points.len(), 2); @@ -1215,7 +1229,7 @@ mod tests { match instrument_name { "counter" => { let counter_data = - test_context.get_aggregation::>("test_counter", None); + test_context.get_aggregation::>("test_counter", None); assert_eq!(counter_data.data_points.len(), 2); let zero_attribute_datapoint = find_sum_datapoint_with_no_attributes(&counter_data.data_points) @@ -1231,7 +1245,7 @@ mod tests { } "updown_counter" => { let updown_counter_data = - test_context.get_aggregation::>("test_updowncounter", None); + test_context.get_aggregation::>("test_updowncounter", None); assert_eq!(updown_counter_data.data_points.len(), 2); let zero_attribute_datapoint = find_sum_datapoint_with_no_attributes(&updown_counter_data.data_points) @@ -1246,8 +1260,8 @@ mod tests { assert_eq!(data_point1.value, 20); } "histogram" => { - let histogram_data = test_context - .get_aggregation::>("test_histogram", None); + let histogram_data = + test_context.get_aggregation::>("test_histogram", None); assert_eq!(histogram_data.data_points.len(), 2); let zero_attribute_datapoint = find_histogram_datapoint_with_no_attributes(&histogram_data.data_points) @@ -1268,8 +1282,7 @@ mod tests { assert_eq!(data_point1.max, Some(30)); } "gauge" => { - let gauge_data = - test_context.get_aggregation::>("test_gauge", None); + let gauge_data = test_context.get_aggregation::>("test_gauge", None); assert_eq!(gauge_data.data_points.len(), 2); let zero_attribute_datapoint = find_gauge_datapoint_with_no_attributes(&gauge_data.data_points) @@ -1368,7 +1381,7 @@ mod tests { match instrument_name { "counter" => { let counter_data = - test_context.get_aggregation::>("test_counter", None); + test_context.get_aggregation::>("test_counter", None); assert_eq!(counter_data.data_points.len(), 2); assert!(counter_data.is_monotonic); let zero_attribute_datapoint = @@ -1385,7 +1398,7 @@ mod tests { } "updown_counter" => { let updown_counter_data = - test_context.get_aggregation::>("test_updowncounter", None); + test_context.get_aggregation::>("test_updowncounter", None); assert_eq!(updown_counter_data.data_points.len(), 2); assert!(!updown_counter_data.is_monotonic); let zero_attribute_datapoint = @@ -1401,8 +1414,7 @@ mod tests { assert_eq!(data_point1.value, 20); } "gauge" => { - let gauge_data = - test_context.get_aggregation::>("test_gauge", None); + let gauge_data = test_context.get_aggregation::>("test_gauge", None); assert_eq!(gauge_data.data_points.len(), 2); let zero_attribute_datapoint = find_gauge_datapoint_with_no_attributes(&gauge_data.data_points) @@ -1451,8 +1463,7 @@ mod tests { // Assert // We invoke `test_context.flush_metrics()` six times. - let sums = - test_context.get_from_multiple_aggregations::>("my_counter", None, 6); + let sums = test_context.get_from_multiple_aggregations::>("my_counter", None, 6); let mut sum_zero_attributes = 0; let mut sum_key1_value1 = 0; @@ -1504,8 +1515,7 @@ mod tests { // Assert // We invoke `test_context.flush_metrics()` six times. - let sums = - test_context.get_from_multiple_aggregations::>("test_counter", None, 6); + let sums = test_context.get_from_multiple_aggregations::>("test_counter", None, 6); let mut sum_zero_attributes = 0.0; let mut sum_key1_value1 = 0.0; @@ -1558,7 +1568,7 @@ mod tests { // Assert // We invoke `test_context.flush_metrics()` six times. - let histograms = test_context.get_from_multiple_aggregations::>( + let histograms = test_context.get_from_multiple_aggregations::>( "test_histogram", None, 6, @@ -1695,7 +1705,7 @@ mod tests { // Assert // We invoke `test_context.flush_metrics()` six times. - let histograms = test_context.get_from_multiple_aggregations::>( + let histograms = test_context.get_from_multiple_aggregations::>( "test_histogram", None, 6, @@ -1825,8 +1835,7 @@ mod tests { test_context.flush_metrics(); // Assert - let histogram_data = - test_context.get_aggregation::>("my_histogram", None); + let histogram_data = test_context.get_aggregation::>("my_histogram", None); // Expecting 2 time-series. assert_eq!(histogram_data.data_points.len(), 2); if let Temporality::Cumulative = temporality { @@ -1872,8 +1881,7 @@ mod tests { test_context.flush_metrics(); - let histogram_data = - test_context.get_aggregation::>("my_histogram", None); + let histogram_data = test_context.get_aggregation::>("my_histogram", None); assert_eq!(histogram_data.data_points.len(), 2); let data_point1 = find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value1") @@ -1922,8 +1930,7 @@ mod tests { test_context.flush_metrics(); // Assert - let histogram_data = - test_context.get_aggregation::>("test_histogram", None); + let histogram_data = test_context.get_aggregation::>("test_histogram", None); // Expecting 2 time-series. assert_eq!(histogram_data.data_points.len(), 1); if let Temporality::Cumulative = temporality { @@ -1976,7 +1983,7 @@ mod tests { test_context.flush_metrics(); // Assert - let gauge_data_point = test_context.get_aggregation::>("my_gauge", None); + let gauge_data_point = test_context.get_aggregation::>("my_gauge", None); // Expecting 2 time-series. assert_eq!(gauge_data_point.data_points.len(), 2); @@ -2005,7 +2012,7 @@ mod tests { test_context.flush_metrics(); - let gauge = test_context.get_aggregation::>("my_gauge", None); + let gauge = test_context.get_aggregation::>("my_gauge", None); assert_eq!(gauge.data_points.len(), 2); let data_point1 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); @@ -2034,7 +2041,7 @@ mod tests { test_context.flush_metrics(); // Assert - let gauge = test_context.get_aggregation::>("test_observable_gauge", None); + let gauge = test_context.get_aggregation::>("test_observable_gauge", None); // Expecting 2 time-series. let expected_time_series_count = if use_empty_attributes { 3 } else { 2 }; assert_eq!(gauge.data_points.len(), expected_time_series_count); @@ -2062,7 +2069,7 @@ mod tests { test_context.flush_metrics(); - let gauge = test_context.get_aggregation::>("test_observable_gauge", None); + let gauge = test_context.get_aggregation::>("test_observable_gauge", None); assert_eq!(gauge.data_points.len(), expected_time_series_count); if use_empty_attributes { @@ -2100,7 +2107,7 @@ mod tests { test_context.flush_metrics(); // Assert - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); // Expecting 2 time-series. assert_eq!(sum.data_points.len(), 2); assert!(sum.is_monotonic, "Counter should produce monotonic."); @@ -2137,7 +2144,7 @@ mod tests { test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 2); let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); @@ -2177,7 +2184,7 @@ mod tests { counter.add(100, &[KeyValue::new("A", "yet_another")]); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); // Expecting 2002 metric points. (2000 + 1 overflow + Empty attributes) assert_eq!(sum.data_points.len(), 2002); @@ -2271,7 +2278,7 @@ mod tests { ); test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_counter", None); // Expecting 1 time-series. assert_eq!(sum.data_points.len(), 1); @@ -2300,7 +2307,7 @@ mod tests { test_context.flush_metrics(); // Assert - let sum = test_context.get_aggregation::>("my_updown_counter", None); + let sum = test_context.get_aggregation::>("my_updown_counter", None); // Expecting 2 time-series. assert_eq!(sum.data_points.len(), 2); assert!( @@ -2336,7 +2343,7 @@ mod tests { test_context.flush_metrics(); - let sum = test_context.get_aggregation::>("my_updown_counter", None); + let sum = test_context.get_aggregation::>("my_updown_counter", None); assert_eq!(sum.data_points.len(), 2); let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); @@ -2491,7 +2498,7 @@ mod tests { assert!(resource_metrics.is_empty(), "no metrics should be exported"); } - fn get_aggregation( + fn get_aggregation( &mut self, counter_name: &str, unit_name: Option<&str>, @@ -2534,7 +2541,7 @@ mod tests { .expect("Failed to cast aggregation to expected type") } - fn get_from_multiple_aggregations( + fn get_from_multiple_aggregations( &mut self, counter_name: &str, unit_name: Option<&str>, diff --git a/opentelemetry-sdk/src/metrics/periodic_reader.rs b/opentelemetry-sdk/src/metrics/periodic_reader.rs index ad9d4ccb54..6201f2d4fe 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader.rs @@ -26,20 +26,6 @@ const METRIC_EXPORT_INTERVAL_NAME: &str = "OTEL_METRIC_EXPORT_INTERVAL"; const METRIC_EXPORT_TIMEOUT_NAME: &str = "OTEL_METRIC_EXPORT_TIMEOUT"; /// Configuration options for [PeriodicReader]. -/// -/// A periodic reader is a [MetricReader] that collects and exports metric data -/// to the exporter at a defined interval. -/// -/// By default, the returned [MetricReader] will collect and export data every -/// 60 seconds. The export time is not counted towards the interval between -/// attempts. PeriodicReader itself does not enforce timeout. Instead timeout -/// is passed on to the exporter for each export attempt. -/// -/// The [collect] method of the returned [MetricReader] continues to gather and -/// return metric data to the user. It will not automatically send that data to -/// the exporter outside of the predefined interval. -/// -/// [collect]: MetricReader::collect #[derive(Debug)] pub struct PeriodicReaderBuilder { interval: Duration, @@ -104,20 +90,25 @@ where } } -/// A [MetricReader] that continuously collects and exports metric data at a set +/// A [MetricReader] that continuously collects and exports metrics at a set /// interval. /// -/// By default, PeriodicReader will collect and export data every -/// 60 seconds. The export time is not counted towards the interval between -/// attempts. PeriodicReader itself does not enforce timeout. -/// Instead timeout is passed on to the exporter for each export attempt. +/// By default, `PeriodicReader` will collect and export metrics every 60 +/// seconds. The export time is not counted towards the interval between +/// attempts. `PeriodicReader` itself does not enforce a timeout. Instead, the +/// timeout is passed on to the configured exporter for each export attempt. /// -/// The [collect] method of the returned continues to gather and -/// return metric data to the user. It will not automatically send that data to -/// the exporter outside of the predefined interval. +/// `PeriodicReader` spawns a background thread to handle the periodic +/// collection and export of metrics. The background thread will continue to run +/// until `shutdown()` is called. /// +/// When using this reader with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires `MeterProvider` to be created within a tokio +/// runtime. +/// - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. /// -/// [collect]: MetricReader::collect +/// In other words, other clients like `reqwest` and `hyper` are not supported. /// /// # Example /// @@ -477,11 +468,11 @@ impl MetricReader for PeriodicReader { mod tests { use super::PeriodicReader; use crate::{ + metrics::InMemoryMetricExporter, metrics::{ data::ResourceMetrics, exporter::PushMetricExporter, reader::MetricReader, MetricError, MetricResult, SdkMeterProvider, Temporality, }, - testing::metrics::InMemoryMetricExporter, Resource, }; use async_trait::async_trait; diff --git a/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs index 33558b579b..dd7e8f9d72 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs @@ -416,8 +416,8 @@ mod tests { use crate::metrics::reader::MetricReader; use crate::metrics::MetricError; use crate::{ - metrics::data::ResourceMetrics, metrics::SdkMeterProvider, runtime, - testing::metrics::InMemoryMetricExporter, Resource, + metrics::data::ResourceMetrics, metrics::InMemoryMetricExporter, metrics::SdkMeterProvider, + runtime, Resource, }; use opentelemetry::metrics::MeterProvider; use std::sync::mpsc; diff --git a/opentelemetry-sdk/src/resource/env.rs b/opentelemetry-sdk/src/resource/env.rs index ac5ee0c034..b4ea198c37 100644 --- a/opentelemetry-sdk/src/resource/env.rs +++ b/opentelemetry-sdk/src/resource/env.rs @@ -45,12 +45,12 @@ impl Default for EnvResourceDetector { fn construct_otel_resources(s: String) -> Resource { Resource::builder_empty() .with_attributes(s.split_terminator(',').filter_map(|entry| { - let mut parts = entry.splitn(2, '='); - let key = parts.next()?.trim(); - let value = parts.next()?.trim(); - if value.find('=').is_some() { - return None; - } + let parts = match entry.split_once('=') { + Some(p) => p, + None => return None, + }; + let key = parts.0.trim(); + let value = parts.1.trim(); Some(KeyValue::new(key.to_owned(), value.to_owned())) })) @@ -84,7 +84,7 @@ impl ResourceDetector for SdkProvidedResourceDetector { .or_else(|| { EnvResourceDetector::new() .detect() - .get(Key::new(super::SERVICE_NAME)) + .get(&Key::new(super::SERVICE_NAME)) }) .unwrap_or_else(|| "unknown_service".into()), )]) @@ -106,7 +106,7 @@ mod tests { [ ( "OTEL_RESOURCE_ATTRIBUTES", - Some("key=value, k = v , a= x, a=z"), + Some("key=value, k = v , a= x, a=z,base64=SGVsbG8sIFdvcmxkIQ=="), ), ("IRRELEVANT", Some("20200810")), ], @@ -121,6 +121,7 @@ mod tests { KeyValue::new("k", "v"), KeyValue::new("a", "x"), KeyValue::new("a", "z"), + KeyValue::new("base64", "SGVsbG8sIFdvcmxkIQ=="), // base64('Hello, World!') ]) .build() ); @@ -137,14 +138,14 @@ mod tests { // Ensure no env var set let no_env = SdkProvidedResourceDetector.detect(); assert_eq!( - no_env.get(Key::from_static_str(crate::resource::SERVICE_NAME)), + no_env.get(&Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("unknown_service")), ); temp_env::with_var(OTEL_SERVICE_NAME, Some("test service"), || { let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( - with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), + with_service.get(&Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service")), ) }); @@ -155,7 +156,7 @@ mod tests { || { let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( - with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), + with_service.get(&Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service1")), ) }, @@ -170,7 +171,7 @@ mod tests { || { let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( - with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), + with_service.get(&Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service")) ); }, diff --git a/opentelemetry-sdk/src/resource/mod.rs b/opentelemetry-sdk/src/resource/mod.rs index c7a26978ea..aa2a101c01 100644 --- a/opentelemetry-sdk/src/resource/mod.rs +++ b/opentelemetry-sdk/src/resource/mod.rs @@ -54,10 +54,11 @@ pub struct Resource { impl Resource { /// Creates a [ResourceBuilder] that allows you to configure multiple aspects of the Resource. /// - /// This [ResourceBuilder] will always include the following [ResourceDetector]s: + /// This [ResourceBuilder] will include the following [ResourceDetector]s: /// - [SdkProvidedResourceDetector] /// - [TelemetryResourceDetector] /// - [EnvResourceDetector] + /// If you'd like to start from an empty resource, use [Resource::builder_empty]. pub fn builder() -> ResourceBuilder { ResourceBuilder { resource: Self::from_detectors(&[ @@ -90,8 +91,7 @@ impl Resource { /// Create a new `Resource` from key value pairs. /// - /// Values are de-duplicated by key, and the first key-value pair with a non-empty string value - /// will be retained + /// Values are de-duplicated by key, and the last key-value pair will be retained pub(crate) fn new>(kvs: T) -> Self { let mut attrs = HashMap::new(); for kv in kvs { @@ -138,8 +138,6 @@ impl Resource { } /// Create a new `Resource` from resource detectors. - /// - /// timeout will be applied to each detector. fn from_detectors(detectors: &[Box]) -> Self { let mut resource = Resource::empty(); for detector in detectors { @@ -227,8 +225,8 @@ impl Resource { } /// Retrieve the value from resource associate with given key. - pub fn get(&self, key: Key) -> Option { - self.inner.attrs.get(&key).cloned() + pub fn get(&self, key: &Key) -> Option { + self.inner.attrs.get(key).cloned() } } @@ -260,8 +258,6 @@ impl<'a> IntoIterator for &'a Resource { pub trait ResourceDetector { /// detect returns an initialized Resource based on gathered information. /// - /// timeout is used in case the detection operation takes too much time. - /// /// If source information to construct a Resource is inaccessible, an empty Resource should be returned /// /// If source information to construct a Resource is invalid, for example, @@ -334,18 +330,24 @@ mod tests { use super::*; - #[test] - fn new_resource() { - let args_with_dupe_keys = [KeyValue::new("a", ""), KeyValue::new("a", "final")]; - - let mut expected_attrs = HashMap::new(); - expected_attrs.insert(Key::new("a"), Value::from("final")); + #[rstest] + #[case([KeyValue::new("a", ""), KeyValue::new("a", "final")], [(Key::new("a"), Value::from("final"))])] + #[case([KeyValue::new("a", "final"), KeyValue::new("a", "")], [(Key::new("a"), Value::from(""))])] + fn new_resource( + #[case] given_attributes: [KeyValue; 2], + #[case] expected_attrs: [(Key, Value); 1], + ) { + // Arrange + let expected = HashMap::from_iter(expected_attrs.into_iter()); + // Act let resource = Resource::builder_empty() - .with_attributes(args_with_dupe_keys) + .with_attributes(given_attributes) .build(); let resource_inner = Arc::try_unwrap(resource.inner).expect("Failed to unwrap Arc"); - assert_eq!(resource_inner.attrs, expected_attrs); + + // Assert + assert_eq!(resource_inner.attrs, expected); assert_eq!(resource_inner.schema_url, None); } diff --git a/opentelemetry-sdk/src/runtime.rs b/opentelemetry-sdk/src/runtime.rs index 7705c10e91..00720e0892 100644 --- a/opentelemetry-sdk/src/runtime.rs +++ b/opentelemetry-sdk/src/runtime.rs @@ -15,6 +15,7 @@ use thiserror::Error; /// /// [Tokio]: https://crates.io/crates/tokio /// [async-std]: https://crates.io/crates/async-std +#[cfg(feature = "experimental_async_runtime")] pub trait Runtime: Clone + Send + Sync + 'static { /// A future stream, which returns items in a previously specified interval. The item type is /// not important. @@ -44,13 +45,19 @@ pub trait Runtime: Clone + Send + Sync + 'static { } /// Runtime implementation, which works with Tokio's multi thread runtime. -#[cfg(feature = "rt-tokio")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))) +)] #[derive(Debug, Clone)] pub struct Tokio; -#[cfg(feature = "rt-tokio")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))) +)] impl Runtime for Tokio { type Interval = tokio_stream::wrappers::IntervalStream; type Delay = ::std::pin::Pin>; @@ -71,13 +78,31 @@ impl Runtime for Tokio { } /// Runtime implementation, which works with Tokio's current thread runtime. -#[cfg(feature = "rt-tokio-current-thread")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] +#[cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" +))] +#[cfg_attr( + docsrs, + doc(cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" + ))) +)] #[derive(Debug, Clone)] pub struct TokioCurrentThread; -#[cfg(feature = "rt-tokio-current-thread")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] +#[cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" +))] +#[cfg_attr( + docsrs, + doc(cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" + ))) +)] impl Runtime for TokioCurrentThread { type Interval = tokio_stream::wrappers::IntervalStream; type Delay = ::std::pin::Pin>; @@ -108,13 +133,19 @@ impl Runtime for TokioCurrentThread { } /// Runtime implementation, which works with async-std. -#[cfg(feature = "rt-async-std")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-async-std")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))) +)] #[derive(Debug, Clone)] pub struct AsyncStd; -#[cfg(feature = "rt-async-std")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-async-std")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))) +)] impl Runtime for AsyncStd { type Interval = async_std::stream::Interval; type Delay = BoxFuture<'static, ()>; @@ -138,6 +169,7 @@ impl Runtime for AsyncStd { /// /// [log]: crate::logs::BatchLogProcessor /// [span]: crate::trace::BatchSpanProcessor +#[cfg(feature = "experimental_async_runtime")] pub trait RuntimeChannel: Runtime { /// A future stream to receive batch messages from channels. type Receiver: Stream + Send; @@ -152,6 +184,7 @@ pub trait RuntimeChannel: Runtime { } /// Error returned by a [`TrySend`] implementation. +#[cfg(feature = "experimental_async_runtime")] #[derive(Debug, Error)] pub enum TrySendError { /// Send failed due to the channel being full. @@ -166,6 +199,7 @@ pub enum TrySendError { } /// TrySend is an abstraction of `Sender` that is capable of sending messages through a reference. +#[cfg(feature = "experimental_async_runtime")] pub trait TrySend: Sync + Send { /// The message that will be sent. type Message; @@ -176,7 +210,10 @@ pub trait TrySend: Sync + Send { fn try_send(&self, item: Self::Message) -> Result<(), TrySendError>; } -#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] +#[cfg(all( + feature = "experimental_async_runtime", + any(feature = "rt-tokio", feature = "rt-tokio-current-thread") +))] impl TrySend for tokio::sync::mpsc::Sender { type Message = T; @@ -188,8 +225,11 @@ impl TrySend for tokio::sync::mpsc::Sender { } } -#[cfg(feature = "rt-tokio")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-tokio"))) +)] impl RuntimeChannel for Tokio { type Receiver = tokio_stream::wrappers::ReceiverStream; type Sender = tokio::sync::mpsc::Sender; @@ -206,8 +246,17 @@ impl RuntimeChannel for Tokio { } } -#[cfg(feature = "rt-tokio-current-thread")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] +#[cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" +))] +#[cfg_attr( + docsrs, + doc(cfg(all( + feature = "experimental_async_runtime", + feature = "rt-tokio-current-thread" + ))) +)] impl RuntimeChannel for TokioCurrentThread { type Receiver = tokio_stream::wrappers::ReceiverStream; type Sender = tokio::sync::mpsc::Sender; @@ -224,7 +273,7 @@ impl RuntimeChannel for TokioCurrentThread { } } -#[cfg(feature = "rt-async-std")] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))] impl TrySend for async_std::channel::Sender { type Message = T; @@ -236,8 +285,11 @@ impl TrySend for async_std::channel::Sender { } } -#[cfg(feature = "rt-async-std")] -#[cfg_attr(docsrs, doc(cfg(feature = "rt-async-std")))] +#[cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "experimental_async_runtime", feature = "rt-async-std"))) +)] impl RuntimeChannel for AsyncStd { type Receiver = async_std::channel::Receiver; type Sender = async_std::channel::Sender; diff --git a/opentelemetry-sdk/src/testing/logs/mod.rs b/opentelemetry-sdk/src/testing/logs/mod.rs deleted file mode 100644 index ed4d5d9560..0000000000 --- a/opentelemetry-sdk/src/testing/logs/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! In-Memory log exporter for testing purpose. - -/// The `in_memory_exporter` module provides in-memory log exporter. -/// For detailed usage and examples, see `in_memory_exporter`. -pub mod in_memory_exporter; -pub use in_memory_exporter::{InMemoryLogExporter, InMemoryLogExporterBuilder}; diff --git a/opentelemetry-sdk/src/testing/metrics/mod.rs b/opentelemetry-sdk/src/testing/metrics/mod.rs index 987588430e..87b218c623 100644 --- a/opentelemetry-sdk/src/testing/metrics/mod.rs +++ b/opentelemetry-sdk/src/testing/metrics/mod.rs @@ -1,10 +1,4 @@ -//! In-Memory metrics exporter for testing purpose. - -/// The `in_memory_exporter` module provides in-memory metrics exporter. -/// For detailed usage and examples, see `in_memory_exporter`. -pub mod in_memory_exporter; -pub use in_memory_exporter::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; - +//! Structs for tests. #[doc(hidden)] pub mod metric_reader; pub use metric_reader::TestMetricReader; diff --git a/opentelemetry-sdk/src/testing/mod.rs b/opentelemetry-sdk/src/testing/mod.rs index 50c79f5d49..97469caab3 100644 --- a/opentelemetry-sdk/src/testing/mod.rs +++ b/opentelemetry-sdk/src/testing/mod.rs @@ -1,10 +1,8 @@ //! In-Memory exporters for testing purpose. +/// Structs used for testing #[cfg(all(feature = "testing", feature = "trace"))] pub mod trace; #[cfg(all(feature = "testing", feature = "metrics"))] pub mod metrics; - -#[cfg(all(feature = "testing", feature = "logs"))] -pub mod logs; diff --git a/opentelemetry-sdk/src/testing/trace/mod.rs b/opentelemetry-sdk/src/testing/trace/mod.rs index c7b7786d36..48a887d21e 100644 --- a/opentelemetry-sdk/src/testing/trace/mod.rs +++ b/opentelemetry-sdk/src/testing/trace/mod.rs @@ -1,10 +1,3 @@ -//! In-Memory trace exporter for testing purpose. - -/// The `in_memory_exporter` module provides in-memory trace exporter. -/// For detailed usage and examples, see `in_memory_exporter`. -pub mod in_memory_exporter; -pub use in_memory_exporter::{InMemorySpanExporter, InMemorySpanExporterBuilder}; - #[doc(hidden)] mod span_exporters; pub use span_exporters::*; diff --git a/opentelemetry-sdk/src/testing/trace/span_exporters.rs b/opentelemetry-sdk/src/testing/trace/span_exporters.rs index e9996e3fc8..4a90e4def4 100644 --- a/opentelemetry-sdk/src/testing/trace/span_exporters.rs +++ b/opentelemetry-sdk/src/testing/trace/span_exporters.rs @@ -1,5 +1,5 @@ use crate::{ - export::trace::{ExportResult, SpanData, SpanExporter}, + trace::{ExportResult, SpanData, SpanExporter}, trace::{SpanEvents, SpanLinks}, }; use futures_util::future::BoxFuture; @@ -98,7 +98,7 @@ impl From> for TestExportError { /// A no-op instance of an [`SpanExporter`]. /// -/// [`SpanExporter`]: crate::export::trace::SpanExporter +/// [`SpanExporter`]: crate::trace::SpanExporter #[derive(Debug, Default)] pub struct NoopSpanExporter { _private: (), diff --git a/opentelemetry-sdk/src/export/trace.rs b/opentelemetry-sdk/src/trace/export.rs similarity index 100% rename from opentelemetry-sdk/src/export/trace.rs rename to opentelemetry-sdk/src/trace/export.rs diff --git a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs b/opentelemetry-sdk/src/trace/in_memory_exporter.rs similarity index 92% rename from opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs rename to opentelemetry-sdk/src/trace/in_memory_exporter.rs index 3645d9f6c2..4f85f46444 100644 --- a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/trace/in_memory_exporter.rs @@ -1,5 +1,5 @@ -use crate::export::trace::{ExportResult, SpanData, SpanExporter}; use crate::resource::Resource; +use crate::trace::{ExportResult, SpanData, SpanExporter}; use futures_util::future::BoxFuture; use opentelemetry::trace::{TraceError, TraceResult}; use std::sync::{Arc, Mutex}; @@ -15,7 +15,7 @@ use std::sync::{Arc, Mutex}; ///# use opentelemetry::{global, trace::Tracer, Context}; ///# use opentelemetry_sdk::propagation::TraceContextPropagator; ///# use opentelemetry_sdk::runtime; -///# use opentelemetry_sdk::testing::trace::InMemorySpanExporterBuilder; +///# use opentelemetry_sdk::trace::InMemorySpanExporterBuilder; ///# use opentelemetry_sdk::trace::{BatchSpanProcessor, TracerProvider}; /// ///# #[tokio::main] @@ -64,7 +64,7 @@ impl Default for InMemorySpanExporter { /// Builder for [`InMemorySpanExporter`]. /// # Example /// ``` -///# use opentelemetry_sdk::testing::trace::InMemorySpanExporterBuilder; +///# use opentelemetry_sdk::trace::InMemorySpanExporterBuilder; /// /// let exporter = InMemorySpanExporterBuilder::new().build(); /// ``` @@ -102,7 +102,7 @@ impl InMemorySpanExporter { /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::trace::InMemorySpanExporter; + /// # use opentelemetry_sdk::trace::InMemorySpanExporter; /// /// let exporter = InMemorySpanExporter::default(); /// let finished_spans = exporter.get_finished_spans().unwrap(); @@ -119,7 +119,7 @@ impl InMemorySpanExporter { /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::trace::InMemorySpanExporter; + /// # use opentelemetry_sdk::trace::InMemorySpanExporter; /// /// let exporter = InMemorySpanExporter::default(); /// exporter.reset(); diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index e714364a85..3a1395d3b2 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -8,6 +8,7 @@ //! * The [`TracerProvider`] struct which configures and produces [`Tracer`]s. mod config; mod events; +mod export; mod id_generator; mod links; mod provider; @@ -22,10 +23,19 @@ mod tracer; pub use config::{config, Config}; pub use events::SpanEvents; +pub use export::{ExportResult, SpanData, SpanExporter}; + +/// In-Memory span exporter for testing purpose. +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub mod in_memory_exporter; +#[cfg(any(feature = "testing", test))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] +pub use in_memory_exporter::{InMemorySpanExporter, InMemorySpanExporterBuilder}; pub use id_generator::{IdGenerator, RandomIdGenerator}; pub use links::SpanLinks; -pub use provider::{Builder, TracerProvider}; +pub use provider::{TracerProvider, TracerProviderBuilder}; pub use sampler::{Sampler, ShouldSample}; pub use span::Span; pub use span_limit::SpanLimits; @@ -48,8 +58,8 @@ mod tests { use super::*; use crate::{ - testing::trace::{InMemorySpanExporter, InMemorySpanExporterBuilder}, trace::span_limit::{DEFAULT_MAX_EVENT_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN}, + trace::{InMemorySpanExporter, InMemorySpanExporterBuilder}, }; use opentelemetry::trace::{ SamplingDecision, SamplingResult, SpanKind, Status, TraceContextExt, TraceState, diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 447404cbb0..5c54bcb550 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -66,7 +66,7 @@ use crate::trace::{ BatchSpanProcessor, Config, RandomIdGenerator, Sampler, SimpleSpanProcessor, SpanLimits, Tracer, }; use crate::Resource; -use crate::{export::trace::SpanExporter, trace::SpanProcessor}; +use crate::{trace::SpanExporter, trace::SpanProcessor}; use opentelemetry::trace::TraceError; use opentelemetry::{otel_debug, trace::TraceResult}; use opentelemetry::{otel_info, InstrumentationScope}; @@ -167,8 +167,8 @@ impl TracerProvider { } /// Create a new [`TracerProvider`] builder. - pub fn builder() -> Builder { - Builder::default() + pub fn builder() -> TracerProviderBuilder { + TracerProviderBuilder::default() } /// Span processors associated with this provider @@ -274,32 +274,62 @@ impl opentelemetry::trace::TracerProvider for TracerProvider { /// Builder for provider attributes. #[derive(Debug, Default)] -pub struct Builder { +pub struct TracerProviderBuilder { processors: Vec>, config: crate::trace::Config, } -impl Builder { - /// The `SpanExporter` that this provider should use. +impl TracerProviderBuilder { + /// Adds a [SimpleSpanProcessor] with the configured exporter to the pipeline. + /// + /// # Arguments + /// + /// * `exporter` - The exporter to be used by the SimpleSpanProcessor. + /// + /// # Returns + /// + /// A new `Builder` instance with the SimpleSpanProcessor added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_simple_exporter(self, exporter: T) -> Self { let mut processors = self.processors; processors.push(Box::new(SimpleSpanProcessor::new(Box::new(exporter)))); - Builder { processors, ..self } + TracerProviderBuilder { processors, ..self } } - /// The [`SpanExporter`] setup using a default [`BatchSpanProcessor`] that this provider should use. + /// Adds a [BatchSpanProcessor] with the configured exporter to the pipeline. + /// + /// # Arguments + /// + /// * `exporter` - The exporter to be used by the BatchSpanProcessor. + /// + /// # Returns + /// + /// A new `Builder` instance with the BatchSpanProcessor added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_batch_exporter(self, exporter: T) -> Self { let batch = BatchSpanProcessor::builder(exporter).build(); self.with_span_processor(batch) } - /// The [`SpanProcessor`] that this provider should use. + /// Adds a custom [SpanProcessor] to the pipeline. + /// + /// # Arguments + /// + /// * `processor` - The `SpanProcessor` to be added. + /// + /// # Returns + /// + /// A new `Builder` instance with the custom `SpanProcessor` added to the pipeline. + /// + /// Processors are invoked in the order they are added. pub fn with_span_processor(self, processor: T) -> Self { let mut processors = self.processors; processors.push(Box::new(processor)); - Builder { processors, ..self } + TracerProviderBuilder { processors, ..self } } /// The sdk [`crate::trace::Config`] that this provider will use. @@ -308,7 +338,7 @@ impl Builder { note = "Config is becoming a private type. Use Builder::with_{config_name}(resource) instead. ex: Builder::with_resource(resource)" )] pub fn with_config(self, config: crate::trace::Config) -> Self { - Builder { config, ..self } + TracerProviderBuilder { config, ..self } } /// Specify the sampler to be used. @@ -368,7 +398,7 @@ impl Builder { /// /// [Tracer]: opentelemetry::trace::Tracer pub fn with_resource(self, resource: Resource) -> Self { - Builder { + TracerProviderBuilder { config: self.config.with_resource(resource), ..self } @@ -413,11 +443,11 @@ impl Builder { #[cfg(test)] mod tests { - use crate::export::trace::SpanData; use crate::resource::{ SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, }; use crate::trace::provider::TracerProviderInner; + use crate::trace::SpanData; use crate::trace::{Config, Span, SpanProcessor}; use crate::Resource; use opentelemetry::trace::{TraceError, TraceResult, Tracer, TracerProvider}; @@ -522,7 +552,7 @@ mod tests { provider .config() .resource - .get(Key::from_static_str(resource_key)) + .get(&Key::from_static_str(resource_key)) .map(|v| v.to_string()), expect.map(|s| s.to_string()) ); @@ -532,15 +562,18 @@ mod tests { provider .config() .resource - .get(TELEMETRY_SDK_LANGUAGE.into()), + .get(&TELEMETRY_SDK_LANGUAGE.into()), Some(Value::from("rust")) ); assert_eq!( - provider.config().resource.get(TELEMETRY_SDK_NAME.into()), + provider.config().resource.get(&TELEMETRY_SDK_NAME.into()), Some(Value::from("opentelemetry")) ); assert_eq!( - provider.config().resource.get(TELEMETRY_SDK_VERSION.into()), + provider + .config() + .resource + .get(&TELEMETRY_SDK_VERSION.into()), Some(Value::from(env!("CARGO_PKG_VERSION"))) ); }; diff --git a/opentelemetry-sdk/src/trace/runtime_tests.rs b/opentelemetry-sdk/src/trace/runtime_tests.rs index ed1a8325d1..02273fcb68 100644 --- a/opentelemetry-sdk/src/trace/runtime_tests.rs +++ b/opentelemetry-sdk/src/trace/runtime_tests.rs @@ -2,12 +2,12 @@ // need to run those tests one by one as the GlobalTracerProvider is a shared object between // threads Use cargo test -- --ignored --test-threads=1 to run those tests. #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] -use crate::export::trace::{ExportResult, SpanExporter}; -#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::runtime; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::runtime::RuntimeChannel; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] +use crate::trace::{ExportResult, SpanExporter}; +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use futures_util::future::BoxFuture; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use opentelemetry::global::*; @@ -28,10 +28,7 @@ struct SpanCountExporter { #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] impl SpanExporter for SpanCountExporter { - fn export( - &mut self, - batch: Vec, - ) -> BoxFuture<'static, ExportResult> { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { self.span_count.fetch_add(batch.len(), Ordering::SeqCst); Box::pin(async { Ok(()) }) } diff --git a/opentelemetry-sdk/src/trace/sampler.rs b/opentelemetry-sdk/src/trace/sampler.rs index 572a17b554..55b5838a0e 100644 --- a/opentelemetry-sdk/src/trace/sampler.rs +++ b/opentelemetry-sdk/src/trace/sampler.rs @@ -57,7 +57,7 @@ use opentelemetry_http::HttpClient; /// [OpenTelemetry SDK]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#sampling /// [`SpanContext`]: opentelemetry::trace::SpanContext /// [`SpanContext::trace_flags()`]: opentelemetry::trace::SpanContext#method.trace_flags -/// [`SpanExporter`]: crate::export::trace::SpanExporter +/// [`SpanExporter`]: crate::trace::SpanExporter /// [`SpanProcessor`]: crate::trace::SpanProcessor /// [`Span`]: opentelemetry::trace::Span /// [`Span::is_recording()`]: opentelemetry::trace::Span#tymethod.is_recording diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index 25a5df0da1..77d3d9b588 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -74,7 +74,7 @@ impl Span { /// Convert information in this span into `exporter::trace::SpanData`. /// This function copies all data from the current span, which will create a /// overhead. - pub fn exported_data(&self) -> Option { + pub fn exported_data(&self) -> Option { let (span_context, tracer) = (self.span_context.clone(), &self.tracer); self.data @@ -250,8 +250,8 @@ fn build_export_data( data: SpanData, span_context: SpanContext, tracer: &crate::trace::Tracer, -) -> crate::export::trace::SpanData { - crate::export::trace::SpanData { +) -> crate::trace::SpanData { + crate::trace::SpanData { span_context, parent_span_id: data.parent_span_id, span_kind: data.span_kind, diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 53ee2f9bc0..278a27a2e1 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -34,9 +34,9 @@ //! [`is_recording`]: opentelemetry::trace::Span::is_recording() //! [`TracerProvider`]: opentelemetry::trace::TracerProvider -use crate::export::trace::{SpanData, SpanExporter}; use crate::resource::Resource; use crate::trace::Span; +use crate::trace::{SpanData, SpanExporter}; use opentelemetry::{otel_debug, otel_warn}; use opentelemetry::{otel_error, otel_info}; use opentelemetry::{ @@ -85,6 +85,7 @@ pub trait SpanProcessor: Send + Sync + std::fmt::Debug { /// `on_end` is called after a `Span` is ended (i.e., the end timestamp is /// already set). This method is called synchronously within the `Span::end` /// API, therefore it should not block or throw an exception. + /// TODO - This method should take reference to `SpanData` fn on_end(&self, span: SpanData); /// Force the spans lying in the cache to be exported. fn force_flush(&self) -> TraceResult<()>; @@ -101,6 +102,17 @@ pub trait SpanProcessor: Send + Sync + std::fmt::Debug { /// `SpanExporter`, as soon as they are finished, without any batching. This is /// typically useful for debugging and testing. For scenarios requiring higher /// performance/throughput, consider using [BatchSpanProcessor]. +/// Spans are exported synchronously +/// in the same thread that emits the log record. +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires TracerProvider to be created within a tokio +/// runtime. Spans can be emitted from any thread, including tokio runtime +/// threads. +/// - `reqwest-blocking-client`: TracerProvider may be created anywhere, but +/// spans must be emitted from a non-tokio runtime thread. +/// - `reqwest-client`: TracerProvider may be created anywhere, but spans must be +/// emitted from a tokio runtime thread. #[derive(Debug)] pub struct SimpleSpanProcessor { exporter: Mutex>, @@ -163,12 +175,20 @@ impl SpanProcessor for SimpleSpanProcessor { } } +use crate::trace::ExportResult; /// The `BatchSpanProcessor` collects finished spans in a buffer and exports them /// in batches to the configured `SpanExporter`. This processor is ideal for /// high-throughput environments, as it minimizes the overhead of exporting spans /// individually. It uses a **dedicated background thread** to manage and export spans /// asynchronously, ensuring that the application's main execution flow is not blocked. /// +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires `TracerProvider` to be created within a tokio +/// runtime. +/// - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. +/// +/// In other words, other clients like `reqwest` and `hyper` are not supported. /// /// # Example /// /// This example demonstrates how to configure and use the `BatchSpanProcessor` @@ -217,8 +237,8 @@ impl SpanProcessor for SimpleSpanProcessor { /// provider.shutdown(); /// } /// ``` -use futures_executor::block_on; use std::sync::mpsc::sync_channel; +use std::sync::mpsc::Receiver; use std::sync::mpsc::RecvTimeoutError; use std::sync::mpsc::SyncSender; @@ -226,7 +246,8 @@ use std::sync::mpsc::SyncSender; #[allow(clippy::large_enum_variant)] #[derive(Debug)] enum BatchMessage { - ExportSpan(SpanData), + //ExportSpan(SpanData), + ExportSpan(Arc), ForceFlush(SyncSender>), Shutdown(SyncSender>), SetResource(Arc), @@ -235,12 +256,17 @@ enum BatchMessage { /// A batch span processor with a dedicated background thread. #[derive(Debug)] pub struct BatchSpanProcessor { - message_sender: SyncSender, + span_sender: SyncSender, // Data channel to store spans + message_sender: SyncSender, // Control channel to store control messages. handle: Mutex>>, forceflush_timeout: Duration, shutdown_timeout: Duration, is_shutdown: AtomicBool, dropped_span_count: Arc, + export_span_message_sent: Arc, + current_batch_size: Arc, + max_export_batch_size: usize, + max_queue_size: usize, } impl BatchSpanProcessor { @@ -255,7 +281,12 @@ impl BatchSpanProcessor { where E: SpanExporter + Send + 'static, { - let (message_sender, message_receiver) = sync_channel(config.max_queue_size); + let (span_sender, span_receiver) = sync_channel::(config.max_queue_size); + let (message_sender, message_receiver) = sync_channel::(64); // Is this a reasonable bound? + let max_queue_size = config.max_queue_size; + let max_export_batch_size = config.max_export_batch_size; + let current_batch_size = Arc::new(AtomicUsize::new(0)); + let current_batch_size_for_thread = current_batch_size.clone(); let handle = thread::Builder::new() .name("OpenTelemetry.Traces.BatchProcessor".to_string()) @@ -268,7 +299,7 @@ impl BatchSpanProcessor { ); let mut spans = Vec::with_capacity(config.max_export_batch_size); let mut last_export_time = Instant::now(); - + let current_batch_size = current_batch_size_for_thread; loop { let remaining_time_option = config .scheduled_delay @@ -279,28 +310,52 @@ impl BatchSpanProcessor { }; match message_receiver.recv_timeout(remaining_time) { Ok(message) => match message { - BatchMessage::ExportSpan(span) => { - spans.push(span); - if spans.len() >= config.max_queue_size - || last_export_time.elapsed() >= config.scheduled_delay - { - if let Err(err) = block_on(exporter.export(spans.split_off(0))) - { - otel_error!( - name: "BatchSpanProcessor.ExportError", - error = format!("{}", err) - ); - } - last_export_time = Instant::now(); - } + BatchMessage::ExportSpan(export_span_message_sent) => { + // Reset the export span message sent flag now it has has been processed. + export_span_message_sent.store(false, Ordering::Relaxed); + otel_debug!( + name: "BatchSpanProcessor.ExportingDueToBatchSize", + ); + let _ = Self::get_spans_and_export( + &span_receiver, + &mut exporter, + &mut spans, + &mut last_export_time, + ¤t_batch_size, + &config, + ); } BatchMessage::ForceFlush(sender) => { - let result = block_on(exporter.export(spans.split_off(0))); + otel_debug!(name: "BatchSpanProcessor.ExportingDueToForceFlush"); + let result = Self::get_spans_and_export( + &span_receiver, + &mut exporter, + &mut spans, + &mut last_export_time, + ¤t_batch_size, + &config, + ); let _ = sender.send(result); } BatchMessage::Shutdown(sender) => { - let result = block_on(exporter.export(spans.split_off(0))); + otel_debug!(name: "BatchSpanProcessor.ExportingDueToShutdown"); + let result = Self::get_spans_and_export( + &span_receiver, + &mut exporter, + &mut spans, + &mut last_export_time, + ¤t_batch_size, + &config, + ); let _ = sender.send(result); + + otel_debug!( + name: "BatchSpanProcessor.ThreadExiting", + reason = "ShutdownRequested" + ); + // + // break out the loop and return from the current background thread. + // break; } BatchMessage::SetResource(resource) => { @@ -308,15 +363,18 @@ impl BatchSpanProcessor { } }, Err(RecvTimeoutError::Timeout) => { - if last_export_time.elapsed() >= config.scheduled_delay { - if let Err(err) = block_on(exporter.export(spans.split_off(0))) { - otel_error!( - name: "BatchSpanProcessor.ExportError", - error = format!("{}", err) - ); - } - last_export_time = Instant::now(); - } + otel_debug!( + name: "BatchSpanProcessor.ExportingDueToTimer", + ); + + let _ = Self::get_spans_and_export( + &span_receiver, + &mut exporter, + &mut spans, + &mut last_export_time, + ¤t_batch_size, + &config, + ); } Err(RecvTimeoutError::Disconnected) => { // Channel disconnected, only thing to do is break @@ -336,12 +394,17 @@ impl BatchSpanProcessor { .expect("Failed to spawn thread"); //TODO: Handle thread spawn failure Self { + span_sender, message_sender, handle: Mutex::new(Some(handle)), forceflush_timeout: Duration::from_secs(5), // TODO: make this configurable shutdown_timeout: Duration::from_secs(5), // TODO: make this configurable is_shutdown: AtomicBool::new(false), dropped_span_count: Arc::new(AtomicUsize::new(0)), + max_queue_size, + export_span_message_sent: Arc::new(AtomicBool::new(false)), + current_batch_size, + max_export_batch_size, } } @@ -355,6 +418,72 @@ impl BatchSpanProcessor { config: BatchConfig::default(), } } + + // This method gets upto `max_export_batch_size` amount of spans from the channel and exports them. + // It returns the result of the export operation. + // It expects the span vec to be empty when it's called. + #[inline] + fn get_spans_and_export( + spans_receiver: &Receiver, + exporter: &mut E, + spans: &mut Vec, + last_export_time: &mut Instant, + current_batch_size: &AtomicUsize, + config: &BatchConfig, + ) -> ExportResult + where + E: SpanExporter + Send + Sync + 'static, + { + // Get upto `max_export_batch_size` amount of spans from the channel and push them to the span vec + while let Ok(span) = spans_receiver.try_recv() { + spans.push(span); + if spans.len() == config.max_export_batch_size { + break; + } + } + + let count_of_spans = spans.len(); // Count of spans that will be exported + let result = Self::export_with_timeout_sync( + config.max_export_timeout, + exporter, + spans, + last_export_time, + ); // This method clears the spans vec after exporting + + current_batch_size.fetch_sub(count_of_spans, Ordering::Relaxed); + result + } + + #[allow(clippy::vec_box)] + fn export_with_timeout_sync( + _: Duration, // TODO, enforcing timeout in exporter. + exporter: &mut E, + batch: &mut Vec, + last_export_time: &mut Instant, + ) -> ExportResult + where + E: SpanExporter + Send + Sync + 'static, + { + *last_export_time = Instant::now(); + + if batch.is_empty() { + return TraceResult::Ok(()); + } + + let export = exporter.export(batch.split_off(0)); + let export_result = futures_executor::block_on(export); + + match export_result { + Ok(_) => TraceResult::Ok(()), + Err(err) => { + otel_error!( + name: "BatchSpanProcessor.ExportError", + error = format!("{}", err) + ); + TraceResult::Err(err) + } + } + } } impl SpanProcessor for BatchSpanProcessor { @@ -369,10 +498,11 @@ impl SpanProcessor for BatchSpanProcessor { // this is a warning, as the user is trying to emit after the processor has been shutdown otel_warn!( name: "BatchSpanProcessor.Emit.ProcessorShutdown", + message = "BatchSpanProcessor has been shutdown. No further spans will be emitted." ); return; } - let result = self.message_sender.try_send(BatchMessage::ExportSpan(span)); + let result = self.span_sender.try_send(span); if result.is_err() { // Increment dropped span count. The first time we have to drop a span, @@ -382,6 +512,36 @@ impl SpanProcessor for BatchSpanProcessor { message = "BatchSpanProcessor dropped a Span due to queue full/internal errors. No further internal log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total Spans dropped."); } } + // At this point, sending the span to the data channel was successful. + // Increment the current batch size and check if it has reached the max export batch size. + if self.current_batch_size.fetch_add(1, Ordering::Relaxed) + 1 >= self.max_export_batch_size + { + // Check if the a control message for exporting spans is already sent to the worker thread. + // If not, send a control message to export spans. + // `export_span_message_sent` is set to false ONLY when the worker thread has processed the control message. + + if !self.export_span_message_sent.load(Ordering::Relaxed) { + // This is a cost-efficient check as atomic load operations do not require exclusive access to cache line. + // Perform atomic swap to `export_span_message_sent` ONLY when the atomic load operation above returns false. + // Atomic swap/compare_exchange operations require exclusive access to cache line on most processor architectures. + // We could have used compare_exchange as well here, but it's more verbose than swap. + if !self.export_span_message_sent.swap(true, Ordering::Relaxed) { + match self.message_sender.try_send(BatchMessage::ExportSpan( + self.export_span_message_sent.clone(), + )) { + Ok(_) => { + // Control message sent successfully. + } + Err(_err) => { + // TODO: Log error + // If the control message could not be sent, reset the `export_span_message_sent` flag. + self.export_span_message_sent + .store(false, Ordering::Relaxed); + } + } + } + } + } } /// Flushes all pending spans. @@ -401,17 +561,20 @@ impl SpanProcessor for BatchSpanProcessor { /// Shuts down the processor. fn shutdown(&self) -> TraceResult<()> { + if self.is_shutdown.swap(true, Ordering::Relaxed) { + return Err(TraceError::Other("Processor already shutdown".into())); + } let dropped_spans = self.dropped_span_count.load(Ordering::Relaxed); + let max_queue_size = self.max_queue_size; if dropped_spans > 0 { otel_warn!( - name: "BatchSpanProcessor.LogsDropped", + name: "BatchSpanProcessor.SpansDropped", dropped_span_count = dropped_spans, + max_queue_size = max_queue_size, message = "Spans were dropped due to a queue being full or other error. The count represents the total count of spans dropped in the lifetime of this BatchSpanProcessor. Consider increasing the queue size and/or decrease delay between intervals." ); } - if self.is_shutdown.swap(true, Ordering::Relaxed) { - return Err(TraceError::Other("Processor already shutdown".into())); - } + let (sender, receiver) = sync_channel(1); self.message_sender .try_send(BatchMessage::Shutdown(sender)) @@ -554,11 +717,12 @@ impl BatchConfigBuilder { self } + #[cfg(feature = "experimental_trace_batch_span_processor_with_async_runtime")] /// Set max_concurrent_exports for [`BatchConfigBuilder`]. /// It's the maximum number of concurrent exports. /// Limits the number of spawned tasks for exports and thus memory consumed by an exporter. /// The default value is 1. - /// IF the max_concurrent_exports value is default value, it will cause exports to be performed + /// If the max_concurrent_exports value is default value, it will cause exports to be performed /// synchronously on the BatchSpanProcessor task. pub fn with_max_concurrent_exports(mut self, max_concurrent_exports: usize) -> Self { self.max_concurrent_exports = max_concurrent_exports; @@ -651,13 +815,14 @@ mod tests { OTEL_BSP_MAX_EXPORT_BATCH_SIZE, OTEL_BSP_MAX_QUEUE_SIZE, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BSP_SCHEDULE_DELAY, OTEL_BSP_SCHEDULE_DELAY_DEFAULT, }; - use crate::export::trace::{ExportResult, SpanData, SpanExporter}; - use crate::testing::trace::{new_test_export_span_data, InMemorySpanExporterBuilder}; + use crate::testing::trace::new_test_export_span_data; use crate::trace::span_processor::{ OTEL_BSP_EXPORT_TIMEOUT_DEFAULT, OTEL_BSP_MAX_CONCURRENT_EXPORTS, OTEL_BSP_MAX_CONCURRENT_EXPORTS_DEFAULT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, }; + use crate::trace::InMemorySpanExporterBuilder; use crate::trace::{BatchConfig, BatchConfigBuilder, SpanEvents, SpanLinks}; + use crate::trace::{ExportResult, SpanData, SpanExporter}; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::Debug; use std::time::Duration; @@ -796,9 +961,10 @@ mod tests { .with_max_export_batch_size(10) .with_scheduled_delay(Duration::from_millis(10)) .with_max_export_timeout(Duration::from_millis(10)) - .with_max_concurrent_exports(10) - .with_max_queue_size(10) - .build(); + .with_max_queue_size(10); + #[cfg(feature = "experimental_trace_batch_span_processor_with_async_runtime")] + let batch = batch.with_max_concurrent_exports(10); + let batch = batch.build(); assert_eq!(batch.max_export_batch_size, 10); assert_eq!(batch.scheduled_delay, Duration::from_millis(10)); assert_eq!(batch.max_export_timeout, Duration::from_millis(10)); @@ -1058,7 +1224,7 @@ mod tests { exported_resource .as_ref() .unwrap() - .get(Key::new("service.name")), + .get(&Key::new("service.name")), Some(Value::from("test_service")) ); } diff --git a/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs b/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs index c3c241c776..ebe73a7cec 100644 --- a/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs +++ b/opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs @@ -1,9 +1,9 @@ -use crate::export::trace::{ExportResult, SpanData, SpanExporter}; use crate::resource::Resource; use crate::runtime::{RuntimeChannel, TrySend}; use crate::trace::BatchConfig; use crate::trace::Span; use crate::trace::SpanProcessor; +use crate::trace::{ExportResult, SpanData, SpanExporter}; use futures_channel::oneshot; use futures_util::{ future::{self, BoxFuture, Either}, @@ -420,16 +420,14 @@ where mod tests { // cargo test trace::span_processor::tests:: --features=testing use super::{BatchSpanProcessor, SpanProcessor}; - use crate::export::trace::{ExportResult, SpanData, SpanExporter}; use crate::runtime; - use crate::testing::trace::{ - new_test_export_span_data, new_tokio_test_exporter, InMemorySpanExporterBuilder, - }; + use crate::testing::trace::{new_test_export_span_data, new_tokio_test_exporter}; use crate::trace::span_processor::{ OTEL_BSP_EXPORT_TIMEOUT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE, OTEL_BSP_MAX_QUEUE_SIZE, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BSP_SCHEDULE_DELAY, OTEL_BSP_SCHEDULE_DELAY_DEFAULT, }; - use crate::trace::{BatchConfig, BatchConfigBuilder}; + use crate::trace::{BatchConfig, BatchConfigBuilder, InMemorySpanExporterBuilder}; + use crate::trace::{ExportResult, SpanData, SpanExporter}; use futures_util::Future; use std::fmt::Debug; use std::time::Duration; diff --git a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh index 7dd423477b..74c1df9d84 100755 --- a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh +++ b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh @@ -5,8 +5,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" CRATE_DIR="${SCRIPT_DIR}/../" # freeze the spec version and generator version to make generation reproducible -SPEC_VERSION=1.29.0 -WEAVER_VERSION=v0.11.0 +SPEC_VERSION=1.30.0 +WEAVER_VERSION=v0.12.0 cd "$CRATE_DIR" @@ -48,7 +48,14 @@ expression=' s/\[([a-zA-Z0-9\.\s]+,[a-zA-Z0-9\.\s]+)\]/\\[\1\\]/g s/\\\[([^\]]+)\]\(([^)]+)\)/[\1](\2)/g ' + +# TODO - remove this with semconv 1.31.0. Refer: https://github.com/open-telemetry/semantic-conventions/pull/1827 +# Fix broken and malformed K8s JobSpec link +link_fix_expression='s/\\\[K8s JobSpec\\\]\(https:\/\/kubernetes\.io\/docs\/reference\/generated\/kubernetes-api\/v1\.30\/#jobspec-v1-batch\./[K8s JobSpec](https:\/\/kubernetes\.io\/docs\/reference\/generated\/kubernetes-api\/v1\.30\/#jobspec-v1-batch)./g' + "${SED[@]}" -E "${expression}" src/metric.rs "${SED[@]}" -E "${expression}" src/attribute.rs +"${SED[@]}" -E "${link_fix_expression}" src/metric.rs + cargo fmt diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml index e1ec32584b..cda3945f94 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml @@ -14,7 +14,7 @@ comment_formats: default_comment_format: rust params: - schema_url: "https://opentelemetry.io/schemas/1.29.0" + schema_url: "https://opentelemetry.io/schemas/1.30.0" exclude_root_namespace: [] excluded_attributes: ["messaging.client_id"] diff --git a/opentelemetry-semantic-conventions/src/attribute.rs b/opentelemetry-semantic-conventions/src/attribute.rs index 91b70da95a..e3e6c61c22 100644 --- a/opentelemetry-semantic-conventions/src/attribute.rs +++ b/opentelemetry-semantic-conventions/src/attribute.rs @@ -41,7 +41,7 @@ pub const ANDROID_STATE: &str = "android.state"; #[cfg(feature = "semconv_experimental")] pub const ARTIFACT_ATTESTATION_FILENAME: &str = "artifact.attestation.filename"; -/// The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the built attestation. Some envelopes in the software attestation space also refer to this as the [digest](https://github.com/in-toto/attestation/blob/main/spec/README.md#in-toto-attestation-framework-spec). +/// The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the built attestation. Some envelopes in the [software attestation space](https://github.com/in-toto/attestation/tree/main/spec) also refer to this as the **digest**. /// /// ## Notes /// @@ -503,6 +503,16 @@ pub const AWS_ECS_TASK_REVISION: &str = "aws.ecs.task.revision"; #[cfg(feature = "semconv_experimental")] pub const AWS_EKS_CLUSTER_ARN: &str = "aws.eks.cluster.arn"; +/// The AWS extended request ID as returned in the response header `x-amz-id-2`. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="` +#[cfg(feature = "semconv_experimental")] +pub const AWS_EXTENDED_REQUEST_ID: &str = "aws.extended_request_id"; + /// The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). /// /// ## Notes @@ -570,7 +580,7 @@ pub const AWS_LOG_STREAM_ARNS: &str = "aws.log.stream.arns"; #[cfg(feature = "semconv_experimental")] pub const AWS_LOG_STREAM_NAMES: &str = "aws.log.stream.names"; -/// The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +/// The AWS request ID as returned in the response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`. /// /// ## Notes /// @@ -709,6 +719,82 @@ pub const AZ_NAMESPACE: &str = "az.namespace"; #[cfg(feature = "semconv_experimental")] pub const AZ_SERVICE_REQUEST_ID: &str = "az.service_request_id"; +/// The unique identifier of the client instance. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"3ba4827d-4422-483f-b59f-85b74211c11d"` +/// - `"storage-client-1"` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_CLIENT_ID: &str = "azure.client.id"; + +/// Cosmos client connection mode. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_CONNECTION_MODE: &str = "azure.cosmosdb.connection.mode"; + +/// Account or request [consistency level](https://learn.microsoft.com/azure/cosmos-db/consistency-levels). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Eventual"` +/// - `"ConsistentPrefix"` +/// - `"BoundedStaleness"` +/// - `"Strong"` +/// - `"Session"` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_CONSISTENCY_LEVEL: &str = "azure.cosmosdb.consistency.level"; + +/// List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. +/// +/// ## Notes +/// +/// Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location) +/// +/// # Examples +/// +/// - `[ +/// "North Central US", +/// "Australia East", +/// "Australia Southeast", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_OPERATION_CONTACTED_REGIONS: &str = + "azure.cosmosdb.operation.contacted_regions"; + +/// The number of request units consumed by the operation. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `46.18` +/// - `1.0` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_OPERATION_REQUEST_CHARGE: &str = "azure.cosmosdb.operation.request_charge"; + +/// Request payload size in bytes. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_REQUEST_BODY_SIZE: &str = "azure.cosmosdb.request.body.size"; + +/// Cosmos DB sub status code. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `1000` +/// - `1002` +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE: &str = "azure.cosmosdb.response.sub_status_code"; + /// Array of brand name and version separated by a space /// /// ## Notes @@ -763,6 +849,59 @@ pub const BROWSER_MOBILE: &str = "browser.mobile"; #[cfg(feature = "semconv_experimental")] pub const BROWSER_PLATFORM: &str = "browser.platform"; +/// The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_CONSISTENCY_LEVEL: &str = "cassandra.consistency.level"; + +/// The data center of the coordinating node for a query. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"us-west-2"` +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_COORDINATOR_DC: &str = "cassandra.coordinator.dc"; + +/// The ID of the coordinating node for a query. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"be13faa2-8574-4d71-926d-27f16cf8a7af"` +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_COORDINATOR_ID: &str = "cassandra.coordinator.id"; + +/// The fetch size used for paging, i.e. how many rows will be returned at once. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `5000` +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_PAGE_SIZE: &str = "cassandra.page.size"; + +/// Whether or not the query is idempotent. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_QUERY_IDEMPOTENT: &str = "cassandra.query.idempotent"; + +/// The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `0` +/// - `2` +#[cfg(feature = "semconv_experimental")] +pub const CASSANDRA_SPECULATIVE_EXECUTION_COUNT: &str = "cassandra.speculative_execution.count"; + /// The human readable name of the pipeline within a CI/CD system. /// /// ## Notes @@ -776,6 +915,19 @@ pub const BROWSER_PLATFORM: &str = "browser.platform"; #[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_NAME: &str = "cicd.pipeline.name"; +/// The result of a pipeline run. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"success"` +/// - `"failure"` +/// - `"timeout"` +/// - `"skipped"` +#[cfg(feature = "semconv_experimental")] +pub const CICD_PIPELINE_RESULT: &str = "cicd.pipeline.result"; + /// The unique identifier of a pipeline run within a CI/CD system. /// /// ## Notes @@ -786,6 +938,18 @@ pub const CICD_PIPELINE_NAME: &str = "cicd.pipeline.name"; #[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_RUN_ID: &str = "cicd.pipeline.run.id"; +/// The pipeline run goes through these states during its lifecycle. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"pending"` +/// - `"executing"` +/// - `"finalizing"` +#[cfg(feature = "semconv_experimental")] +pub const CICD_PIPELINE_RUN_STATE: &str = "cicd.pipeline.run.state"; + /// The human readable name of a task within a pipeline. Task here most closely aligns with a [computing process](https://wikipedia.org/wiki/Pipeline_(computing)) in a pipeline. Other terms for tasks include commands, steps, and procedures. /// /// ## Notes @@ -831,6 +995,30 @@ pub const CICD_PIPELINE_TASK_RUN_URL_FULL: &str = "cicd.pipeline.task.run.url.fu #[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_TASK_TYPE: &str = "cicd.pipeline.task.type"; +/// The name of a component of the CICD system. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"controller"` +/// - `"scheduler"` +/// - `"agent"` +#[cfg(feature = "semconv_experimental")] +pub const CICD_SYSTEM_COMPONENT: &str = "cicd.system.component"; + +/// The state of a CICD worker / agent. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"idle"` +/// - `"busy"` +/// - `"down"` +#[cfg(feature = "semconv_experimental")] +pub const CICD_WORKER_STATE: &str = "cicd.worker.state"; + /// Client address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. /// /// ## Notes @@ -1159,7 +1347,7 @@ pub const CLOUDFOUNDRY_SYSTEM_ID: &str = "cloudfoundry.system.id"; #[cfg(feature = "semconv_experimental")] pub const CLOUDFOUNDRY_SYSTEM_INSTANCE_ID: &str = "cloudfoundry.system.instance.id"; -/// The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. +/// Deprecated, use `code.column.number` /// /// ## Notes /// @@ -1167,8 +1355,19 @@ pub const CLOUDFOUNDRY_SYSTEM_INSTANCE_ID: &str = "cloudfoundry.system.instance. /// /// - `16` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `code.column.number`")] pub const CODE_COLUMN: &str = "code.column"; +/// The column number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `16` +#[cfg(feature = "semconv_experimental")] +pub const CODE_COLUMN_NUMBER: &str = "code.column.number"; + /// The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). /// /// ## Notes @@ -1177,9 +1376,19 @@ pub const CODE_COLUMN: &str = "code.column"; /// /// - `"/usr/local/MyApplication/content_root/app/index.php"` #[cfg(feature = "semconv_experimental")] +pub const CODE_FILE_PATH: &str = "code.file.path"; + +/// Deprecated, use `code.file.path` instead +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"/usr/local/MyApplication/content_root/app/index.php"` +#[cfg(feature = "semconv_experimental")] pub const CODE_FILEPATH: &str = "code.filepath"; -/// The method or function name, or equivalent (usually rightmost part of the code unit's name). +/// Deprecated, use `code.function.name` instead /// /// ## Notes /// @@ -1187,9 +1396,20 @@ pub const CODE_FILEPATH: &str = "code.filepath"; /// /// - `"serveRequest"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `code.function.name`")] pub const CODE_FUNCTION: &str = "code.function"; -/// The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. +/// The method or function name, or equivalent (usually rightmost part of the code unit's name). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"serveRequest"` +#[cfg(feature = "semconv_experimental")] +pub const CODE_FUNCTION_NAME: &str = "code.function.name"; + +/// The line number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. /// /// ## Notes /// @@ -1197,9 +1417,20 @@ pub const CODE_FUNCTION: &str = "code.function"; /// /// - `42` #[cfg(feature = "semconv_experimental")] +pub const CODE_LINE_NUMBER: &str = "code.line.number"; + +/// Deprecated, use `code.line.number` instead +/// +/// ## Notes +/// +/// # Examples +/// +/// - `42` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `code.line.number`")] pub const CODE_LINENO: &str = "code.lineno"; -/// The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. +/// The "namespace" within which `code.function.name` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function.name` form a unique identifier for the code unit. /// /// ## Notes /// @@ -1407,13 +1638,14 @@ pub const CONTAINER_RUNTIME: &str = "container.runtime"; #[cfg(feature = "semconv_experimental")] pub const CPU_MODE: &str = "cpu.mode"; -/// The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). +/// Deprecated, use `cassandra.consistency.level` instead. /// /// ## Notes #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.consistency.level`.")] pub const DB_CASSANDRA_CONSISTENCY_LEVEL: &str = "db.cassandra.consistency_level"; -/// The data center of the coordinating node for a query. +/// Deprecated, use `cassandra.coordinator.dc` instead. /// /// ## Notes /// @@ -1421,9 +1653,10 @@ pub const DB_CASSANDRA_CONSISTENCY_LEVEL: &str = "db.cassandra.consistency_level /// /// - `"us-west-2"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.coordinator.dc`.")] pub const DB_CASSANDRA_COORDINATOR_DC: &str = "db.cassandra.coordinator.dc"; -/// The ID of the coordinating node for a query. +/// Deprecated, use `cassandra.coordinator.id` instead. /// /// ## Notes /// @@ -1431,15 +1664,17 @@ pub const DB_CASSANDRA_COORDINATOR_DC: &str = "db.cassandra.coordinator.dc"; /// /// - `"be13faa2-8574-4d71-926d-27f16cf8a7af"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.coordinator.id`.")] pub const DB_CASSANDRA_COORDINATOR_ID: &str = "db.cassandra.coordinator.id"; -/// Whether or not the query is idempotent. +/// Deprecated, use `cassandra.query.idempotent` instead. /// /// ## Notes #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.query.idempotent`.")] pub const DB_CASSANDRA_IDEMPOTENCE: &str = "db.cassandra.idempotence"; -/// The fetch size used for paging, i.e. how many rows will be returned at once. +/// Deprecated, use `cassandra.page.size` instead. /// /// ## Notes /// @@ -1447,9 +1682,10 @@ pub const DB_CASSANDRA_IDEMPOTENCE: &str = "db.cassandra.idempotence"; /// /// - `5000` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.page.size`.")] pub const DB_CASSANDRA_PAGE_SIZE: &str = "db.cassandra.page_size"; -/// The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. +/// Deprecated, use `cassandra.speculative_execution.count` instead. /// /// ## Notes /// @@ -1458,6 +1694,7 @@ pub const DB_CASSANDRA_PAGE_SIZE: &str = "db.cassandra.page_size"; /// - `0` /// - `2` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cassandra.speculative_execution.count`.")] pub const DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: &str = "db.cassandra.speculative_execution_count"; @@ -1526,8 +1763,6 @@ pub const DB_CLIENT_CONNECTIONS_STATE: &str = "db.client.connections.state"; /// For batch operations, if the individual operations are known to have the same collection name /// then that collection name SHOULD be used. /// -/// This attribute has stability level RELEASE CANDIDATE. -/// /// # Examples /// /// - `"public.users"` @@ -1546,7 +1781,7 @@ pub const DB_COLLECTION_NAME: &str = "db.collection.name"; #[deprecated(note = "Replaced by `server.address` and `server.port`.")] pub const DB_CONNECTION_STRING: &str = "db.connection_string"; -/// Unique Cosmos client instance id. +/// Deprecated, use `azure.client.id` instead. /// /// ## Notes /// @@ -1554,15 +1789,17 @@ pub const DB_CONNECTION_STRING: &str = "db.connection_string"; /// /// - `"3ba4827d-4422-483f-b59f-85b74211c11d"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.client.id`.")] pub const DB_COSMOSDB_CLIENT_ID: &str = "db.cosmosdb.client_id"; -/// Cosmos client connection mode. +/// Deprecated, use `azure.cosmosdb.connection.mode` instead. /// /// ## Notes #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.connection.mode`.")] pub const DB_COSMOSDB_CONNECTION_MODE: &str = "db.cosmosdb.connection_mode"; -/// Account or request [consistency level](https://learn.microsoft.com/azure/cosmos-db/consistency-levels). +/// Deprecated, use `cosmosdb.consistency.level` instead. /// /// ## Notes /// @@ -1574,6 +1811,7 @@ pub const DB_COSMOSDB_CONNECTION_MODE: &str = "db.cosmosdb.connection_mode"; /// - `"Strong"` /// - `"Session"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.consistency.level`.")] pub const DB_COSMOSDB_CONSISTENCY_LEVEL: &str = "db.cosmosdb.consistency_level"; /// Deprecated, use `db.collection.name` instead. @@ -1594,12 +1832,10 @@ pub const DB_COSMOSDB_CONTAINER: &str = "db.cosmosdb.container"; #[deprecated(note = "No replacement at this time.")] pub const DB_COSMOSDB_OPERATION_TYPE: &str = "db.cosmosdb.operation_type"; -/// List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. +/// Deprecated, use `azure.cosmosdb.operation.contacted_regions` instead. /// /// ## Notes /// -/// Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location) -/// /// # Examples /// /// - `[ @@ -1608,9 +1844,10 @@ pub const DB_COSMOSDB_OPERATION_TYPE: &str = "db.cosmosdb.operation_type"; /// "Australia Southeast", /// ]` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.operation.contacted_regions`.")] pub const DB_COSMOSDB_REGIONS_CONTACTED: &str = "db.cosmosdb.regions_contacted"; -/// Request units consumed for the operation. +/// Deprecated, use `azure.cosmosdb.operation.request_charge` instead. /// /// ## Notes /// @@ -1619,12 +1856,14 @@ pub const DB_COSMOSDB_REGIONS_CONTACTED: &str = "db.cosmosdb.regions_contacted"; /// - `46.18` /// - `1.0` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.operation.request_charge`.")] pub const DB_COSMOSDB_REQUEST_CHARGE: &str = "db.cosmosdb.request_charge"; -/// Request payload size in bytes. +/// Deprecated, use `azure.cosmosdb.request.body.size` instead. /// /// ## Notes #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.request.body.size`.")] pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: &str = "db.cosmosdb.request_content_length"; /// Deprecated, use `db.response.status_code` instead. @@ -1639,7 +1878,7 @@ pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: &str = "db.cosmosdb.request_conten #[deprecated(note = "Replaced by `db.response.status_code`.")] pub const DB_COSMOSDB_STATUS_CODE: &str = "db.cosmosdb.status_code"; -/// Cosmos DB sub status code. +/// Deprecated, use `azure.cosmosdb.response.sub_status_code` instead. /// /// ## Notes /// @@ -1648,6 +1887,7 @@ pub const DB_COSMOSDB_STATUS_CODE: &str = "db.cosmosdb.status_code"; /// - `1000` /// - `1002` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.response.sub_status_code`.")] pub const DB_COSMOSDB_SUB_STATUS_CODE: &str = "db.cosmosdb.sub_status_code"; /// Deprecated, use `db.namespace` instead. @@ -1661,7 +1901,7 @@ pub const DB_COSMOSDB_SUB_STATUS_CODE: &str = "db.cosmosdb.sub_status_code"; #[deprecated(note = "Replaced by `db.namespace`.")] pub const DB_ELASTICSEARCH_CLUSTER_NAME: &str = "db.elasticsearch.cluster.name"; -/// Represents the human-readable identifier of the node/instance to which a request was routed. +/// Deprecated, use `elasticsearch.node.name` instead. /// /// ## Notes /// @@ -1669,19 +1909,19 @@ pub const DB_ELASTICSEARCH_CLUSTER_NAME: &str = "db.elasticsearch.cluster.name"; /// /// - `"instance-0000000001"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `elasticsearch.node.name`.")] pub const DB_ELASTICSEARCH_NODE_NAME: &str = "db.elasticsearch.node.name"; -/// A dynamic value in the url path. +/// Deprecated, use `db.operation.parameter` instead. /// /// ## Notes /// -/// Many Elasticsearch url paths allow dynamic values. These SHOULD be recorded in span attributes in the format `db.elasticsearch.path_parts.[key]`, where `[key]` is the url path part name. The implementation SHOULD reference the [elasticsearch schema](https://raw.githubusercontent.com/elastic/elasticsearch-specification/main/output/schema/schema.json) in order to map the path part values to their names. -/// /// # Examples /// /// - `"db.elasticsearch.path_parts.index=test-index"` /// - `"db.elasticsearch.path_parts.doc_id=123"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.operation.parameter`.")] pub const DB_ELASTICSEARCH_PATH_PARTS: &str = "db.elasticsearch.path_parts"; /// Deprecated, no general replacement at this time. For Elasticsearch, use `db.elasticsearch.node.name` instead. @@ -1750,7 +1990,6 @@ pub const DB_NAME: &str = "db.name"; /// If a database system has multiple namespace components, they SHOULD be concatenated (potentially using database system specific conventions) from most general to most specific namespace component, and more specific namespaces SHOULD NOT be captured without the more general namespaces, to ensure that "startswith" queries for the more general namespaces will be valid. /// Semantic conventions for individual database systems SHOULD document what `db.namespace` means in the context of that system. /// It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1777,7 +2016,6 @@ pub const DB_OPERATION: &str = "db.operation"; /// ## Notes /// /// Operations are only considered batches when they contain two or more operations, and so `db.operation.batch.size` SHOULD never be `1`. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1802,8 +2040,6 @@ pub const DB_OPERATION_BATCH_SIZE: &str = "db.operation.batch.size"; /// otherwise `db.operation.name` SHOULD be `BATCH` or some other database /// system specific term if more applicable. /// -/// This attribute has stability level RELEASE CANDIDATE. -/// /// # Examples /// /// - `"findAndModify"` @@ -1818,7 +2054,6 @@ pub const DB_OPERATION_NAME: &str = "db.operation.name"; /// /// If a parameter has no name and instead is referenced only by index, then `[key]` SHOULD be the 0-based index. /// If `db.query.text` is also captured, then `db.operation.parameter.[key]` SHOULD match up with the parameterized placeholders present in `db.query.text`. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1845,7 +2080,6 @@ pub const DB_QUERY_PARAMETER: &str = "db.query.parameter"; /// /// `db.query.summary` provides static summary of the query text. It describes a class of database queries and is useful as a grouping key, especially when analyzing telemetry for database calls involving complex queries. /// Summary may be available to the instrumentation through instrumentation hooks or other means. If it is not available, instrumentations that support query parsing SHOULD generate a summary following [Generating query summary](../../docs/database/database-spans.md#generating-a-summary-of-the-query-text) section. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1862,7 +2096,6 @@ pub const DB_QUERY_SUMMARY: &str = "db.query.summary"; /// For sanitization see [Sanitization of `db.query.text`](../../docs/database/database-spans.md#sanitization-of-dbquerytext). /// For batch operations, if the individual operations are known to have the same query text then that query text SHOULD be used, otherwise all of the individual query texts SHOULD be concatenated with separator `; ` or some other database system specific separator if more applicable. /// Even though parameterized query text can potentially have sensitive data, by using a parameterized query the user is giving a strong signal that any sensitive data will be passed as parameter values, and the benefit to observability of capturing the static part of the query text by default outweighs the risk. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1902,7 +2135,6 @@ pub const DB_RESPONSE_RETURNED_ROWS: &str = "db.response.returned_rows"; /// /// The status code returned by the database. Usually it represents an error code, but may also represent partial success, warning, or differentiate between various types of successful outcomes. /// Semantic conventions for individual database systems SHOULD document what `db.response.status_code` means in the context of that system. -/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// @@ -1936,14 +2168,20 @@ pub const DB_SQL_TABLE: &str = "db.sql.table"; #[deprecated(note = "Replaced by `db.query.text`.")] pub const DB_STATEMENT: &str = "db.statement"; +/// Deprecated, use `db.system.name` instead. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.system.name`.")] +pub const DB_SYSTEM: &str = "db.system"; + /// The database management system (DBMS) product as identified by the client instrumentation. /// /// ## Notes /// -/// The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system` is set to `postgresql` based on the instrumentation's best knowledge. -/// This attribute has stability level RELEASE CANDIDATE +/// The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system.name` is set to `postgresql` based on the instrumentation's best knowledge #[cfg(feature = "semconv_experimental")] -pub const DB_SYSTEM: &str = "db.system"; +pub const DB_SYSTEM_NAME: &str = "db.system.name"; /// Deprecated, no replacement at this time. /// @@ -2123,9 +2361,18 @@ pub const DNS_QUESTION_NAME: &str = "dns.question.name"; /// - `"gen0"` /// - `"gen1"` /// - `"gen2"` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_GC_HEAP_GENERATION: &str = "dotnet.gc.heap.generation"; +/// Represents the human-readable identifier of the node/instance to which a request was routed. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"instance-0000000001"` +#[cfg(feature = "semconv_experimental")] +pub const ELASTICSEARCH_NODE_NAME: &str = "elasticsearch.node.name"; + /// Deprecated, use `user.id` instead. /// /// ## Notes @@ -2195,35 +2442,20 @@ pub const ERROR_TYPE: &str = "error.type"; /// /// ## Notes /// -/// Event names are subject to the same rules as [attribute names](/docs/general/attribute-naming.md). Notably, event names are namespaced to avoid collisions and provide a clean separation of semantics for events in separate domains like browser, mobile, and kubernetes. -/// /// # Examples /// /// - `"browser.mouse.click"` /// - `"device.app.lifecycle"` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by EventName top-level field on the LogRecord")] pub const EVENT_NAME: &str = "event.name"; -/// SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span. +/// Indicates that the exception is escaping the scope of the span. /// /// ## Notes -/// -/// An exception is considered to have escaped (or left) the scope of a span, -/// if that span is ended while the exception is still logically "in flight". -/// This may be actually "in flight" in some languages (e.g. if the exception -/// is passed to a Context manager's `__exit__` method in Python) but will -/// usually be caught at the point of recording the exception in most languages. -/// -/// It is usually not possible to determine at the point where an exception is thrown -/// whether it will escape the scope of a span. -/// However, it is trivial to know that an exception -/// will escape, if one checks for an active exception just before ending the span, -/// as done in the [example for recording span exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). -/// -/// It follows that an exception may still escape the scope of the span -/// even if the `exception.escaped` attribute was not set or set to false, -/// since the event might have been recorded at a time where it was not -/// clear whether the exception will escape +#[deprecated( + note = "It's no longer recommended to record exceptions that are handled and do not escape the scope of a span." +)] pub const EXCEPTION_ESCAPED: &str = "exception.escaped"; /// The exception message. @@ -2382,7 +2614,7 @@ pub const FAAS_MAX_MEMORY: &str = "faas.max_memory"; /// This is the name of the function as configured/deployed on the FaaS /// platform and is usually different from the name of the callback /// function (which may be stored in the -/// [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) +/// [`code.namespace`/`code.function.name`](/docs/general/attributes.md#source-code-attributes) /// span attributes). /// /// For some cloud providers, the above definition is ambiguous. The following @@ -2811,7 +3043,7 @@ pub const GEN_AI_COMPLETION: &str = "gen_ai.completion"; #[cfg(feature = "semconv_experimental")] pub const GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: &str = "gen_ai.openai.request.response_format"; -/// Requests with same seed value more likely to return same result. +/// Deprecated, use `gen_ai.request.seed`. /// /// ## Notes /// @@ -2819,6 +3051,7 @@ pub const GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: &str = "gen_ai.openai.request.r /// /// - `100` #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `gen_ai.request.seed` attribute.")] pub const GEN_AI_OPENAI_REQUEST_SEED: &str = "gen_ai.openai.request.seed"; /// The service tier requested. May be a specific tier, default, or auto. @@ -2931,6 +3164,16 @@ pub const GEN_AI_REQUEST_MODEL: &str = "gen_ai.request.model"; #[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_PRESENCE_PENALTY: &str = "gen_ai.request.presence_penalty"; +/// Requests with same seed value more likely to return same result. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `100` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_REQUEST_SEED: &str = "gen_ai.request.seed"; + /// List of sequences that the model will use to stop generating further tokens. /// /// ## Notes @@ -3018,8 +3261,10 @@ pub const GEN_AI_RESPONSE_MODEL: &str = "gen_ai.response.model"; /// by `gen_ai.request.model` and `gen_ai.response.model` attributes. /// /// The actual GenAI product may differ from the one identified by the client. -/// For example, when using OpenAI client libraries to communicate with Mistral, the `gen_ai.system` -/// is set to `openai` based on the instrumentation's best knowledge. +/// Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client +/// libraries. In such cases, the `gen_ai.system` is set to `openai` based on the +/// instrumentation's best knowledge, instead of the actual system. The `server.address` +/// attribute may help identify the actual system in use for `openai`. /// /// For custom model, a custom friendly name SHOULD be used. /// If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`. @@ -3983,6 +4228,20 @@ pub const K8S_JOB_UID: &str = "k8s.job.uid"; #[cfg(feature = "semconv_experimental")] pub const K8S_NAMESPACE_NAME: &str = "k8s.namespace.name"; +/// The phase of the K8s namespace. +/// +/// ## Notes +/// +/// This attribute aligns with the `phase` field of the +/// [K8s NamespaceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core) +/// +/// # Examples +/// +/// - `"active"` +/// - `"terminating"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NAMESPACE_PHASE: &str = "k8s.namespace.phase"; + /// The name of the Node. /// /// ## Notes @@ -4912,6 +5171,18 @@ pub const NETWORK_CARRIER_MNC: &str = "network.carrier.mnc"; #[cfg(feature = "semconv_experimental")] pub const NETWORK_CARRIER_NAME: &str = "network.carrier.name"; +/// The state of network connection +/// +/// ## Notes +/// +/// Connection states are defined as part of the [rfc9293](https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2) +/// +/// # Examples +/// +/// - `"close_wait"` +#[cfg(feature = "semconv_experimental")] +pub const NETWORK_CONNECTION_STATE: &str = "network.connection.state"; + /// This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. /// /// ## Notes @@ -4940,6 +5211,7 @@ pub const NETWORK_CONNECTION_TYPE: &str = "network.connection.type"; /// /// - `"lo"` /// - `"eth0"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_INTERFACE_NAME: &str = "network.interface.name"; /// The network IO operation direction. @@ -5703,7 +5975,7 @@ pub const RPC_MESSAGE_UNCOMPRESSED_SIZE: &str = "rpc.message.uncompressed_size"; /// /// ## Notes /// -/// This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). +/// This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function.name` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). /// /// # Examples /// @@ -5729,6 +6001,89 @@ pub const RPC_SERVICE: &str = "rpc.service"; #[cfg(feature = "semconv_experimental")] pub const RPC_SYSTEM: &str = "rpc.system"; +/// A categorization value keyword used by the entity using the rule for detection of this event +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Attempted Information Leak"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_CATEGORY: &str = "security_rule.category"; + +/// The description of the rule generating the event. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Block requests to public DNS over HTTPS / TLS protocols"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_DESCRIPTION: &str = "security_rule.description"; + +/// Name of the license under which the rule used to generate this event is made available. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Apache 2.0"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_LICENSE: &str = "security_rule.license"; + +/// The name of the rule or signature generating the event. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"BLOCK_DNS_over_TLS"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_NAME: &str = "security_rule.name"; + +/// Reference URL to additional information about the rule used to generate this event. +/// +/// ## Notes +/// +/// The URL can point to the vendor’s documentation about the rule. If that’s not available, it can also be a link to a more general page describing this type of alert. +/// +/// # Examples +/// +/// - `"https://en.wikipedia.org/wiki/DNS_over_TLS"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_REFERENCE: &str = "security_rule.reference"; + +/// Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Standard_Protocol_Filters"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_RULESET_NAME: &str = "security_rule.ruleset.name"; + +/// A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"550e8400-e29b-41d4-a716-446655440000"` +/// - `"1100110011"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_UUID: &str = "security_rule.uuid"; + +/// The version / revision of the rule being used for analysis. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"1.0.0"` +#[cfg(feature = "semconv_experimental")] +pub const SECURITY_RULE_VERSION: &str = "security_rule.version"; + /// Server domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. /// /// ## Notes @@ -5973,7 +6328,7 @@ pub const SYSTEM_FILESYSTEM_TYPE: &str = "system.filesystem.type"; #[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_STATE: &str = "system.memory.state"; -/// A stateless protocol MUST NOT set this attribute +/// Deprecated, use `network.connection.state` instead. /// /// ## Notes /// @@ -5981,6 +6336,9 @@ pub const SYSTEM_MEMORY_STATE: &str = "system.memory.state"; /// /// - `"close_wait"` #[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Removed, report network connection state with `network.connection.state` attribute" +)] pub const SYSTEM_NETWORK_STATE: &str = "system.network.state"; /// The paging access direction @@ -6820,6 +7178,10 @@ pub const VCS_LINE_CHANGE_TYPE: &str = "vcs.line_change.type"; /// /// ## Notes /// +/// `base` refers to the starting point of a change. For example, `main` +/// would be the base reference of type branch if you've created a new +/// reference of type branch from it and created new commits. +/// /// # Examples /// /// - `"my-feature-branch"` @@ -6831,15 +7193,19 @@ pub const VCS_REF_BASE_NAME: &str = "vcs.ref.base.name"; /// /// ## Notes /// -/// The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), +/// `base` refers to the starting point of a change. For example, `main` +/// would be the base reference of type branch if you've created a new +/// reference of type branch from it and created new commits. The +/// revision can be a full [hash value (see +/// glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), /// of the recorded change to a ref within a repository pointing to a /// commit [commit](https://git-scm.com/docs/git-commit) object. It does -/// not necessarily have to be a hash; it can simply define a -/// [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) +/// not necessarily have to be a hash; it can simply define a [revision +/// number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) /// which is an integer that is monotonically increasing. In cases where -/// it is identical to the `ref.base.name`, it SHOULD still be included. It is -/// up to the implementer to decide which value to set as the revision -/// based on the VCS system and situational context. +/// it is identical to the `ref.base.name`, it SHOULD still be included. +/// It is up to the implementer to decide which value to set as the +/// revision based on the VCS system and situational context. /// /// # Examples /// @@ -6854,6 +7220,10 @@ pub const VCS_REF_BASE_REVISION: &str = "vcs.ref.base.revision"; /// /// ## Notes /// +/// `base` refers to the starting point of a change. For example, `main` +/// would be the base reference of type branch if you've created a new +/// reference of type branch from it and created new commits. +/// /// # Examples /// /// - `"branch"` @@ -6865,6 +7235,9 @@ pub const VCS_REF_BASE_TYPE: &str = "vcs.ref.base.type"; /// /// ## Notes /// +/// `head` refers to where you are right now; the current reference at a +/// given time. +/// /// # Examples /// /// - `"my-feature-branch"` @@ -6876,15 +7249,17 @@ pub const VCS_REF_HEAD_NAME: &str = "vcs.ref.head.name"; /// /// ## Notes /// -/// The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), +/// `head` refers to where you are right now; the current reference at a +/// given time.The revision can be a full [hash value (see +/// glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), /// of the recorded change to a ref within a repository pointing to a /// commit [commit](https://git-scm.com/docs/git-commit) object. It does -/// not necessarily have to be a hash; it can simply define a -/// [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) +/// not necessarily have to be a hash; it can simply define a [revision +/// number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) /// which is an integer that is monotonically increasing. In cases where -/// it is identical to the `ref.head.name`, it SHOULD still be included. It is -/// up to the implementer to decide which value to set as the revision -/// based on the VCS system and situational context. +/// it is identical to the `ref.head.name`, it SHOULD still be included. +/// It is up to the implementer to decide which value to set as the +/// revision based on the VCS system and situational context. /// /// # Examples /// @@ -6899,6 +7274,9 @@ pub const VCS_REF_HEAD_REVISION: &str = "vcs.ref.head.revision"; /// /// ## Notes /// +/// `head` refers to where you are right now; the current reference at a +/// given time. +/// /// # Examples /// /// - `"branch"` @@ -6941,6 +7319,21 @@ pub const VCS_REPOSITORY_CHANGE_ID: &str = "vcs.repository.change.id"; #[deprecated(note = "Deprecated, use `vcs.change.title` instead.")] pub const VCS_REPOSITORY_CHANGE_TITLE: &str = "vcs.repository.change.title"; +/// The human readable name of the repository. It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab or organization in GitHub. +/// +/// ## Notes +/// +/// Due to it only being the name, it can clash with forks of the same +/// repository if collecting telemetry across multiple orgs or groups in +/// the same backends. +/// +/// # Examples +/// +/// - `"semantic-conventions"` +/// - `"my-cool-repo"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REPOSITORY_NAME: &str = "vcs.repository.name"; + /// Deprecated, use `vcs.ref.head.name` instead. /// /// ## Notes @@ -6979,10 +7372,13 @@ pub const VCS_REPOSITORY_REF_REVISION: &str = "vcs.repository.ref.revision"; #[deprecated(note = "Deprecated, use `vcs.ref.head.type` instead.")] pub const VCS_REPOSITORY_REF_TYPE: &str = "vcs.repository.ref.type"; -/// The [URL](https://wikipedia.org/wiki/URL) of the repository providing the complete address in order to locate and identify the repository. +/// The [canonical URL](https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.) of the repository providing the complete HTTP(S) address in order to locate and identify the repository through a browser. /// /// ## Notes /// +/// In Git Version Control Systems, the canonical URL SHOULD NOT include +/// the `.git` extension. +/// /// # Examples /// /// - `"https://github.com/opentelemetry/open-telemetry-collector-contrib"` diff --git a/opentelemetry-semantic-conventions/src/lib.rs b/opentelemetry-semantic-conventions/src/lib.rs index bdb0a9277e..21a73cfcb4 100644 --- a/opentelemetry-semantic-conventions/src/lib.rs +++ b/opentelemetry-semantic-conventions/src/lib.rs @@ -22,4 +22,4 @@ pub mod trace; /// The schema URL that matches the version of the semantic conventions that /// this crate defines. -pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.29.0"; +pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.30.0"; diff --git a/opentelemetry-semantic-conventions/src/metric.rs b/opentelemetry-semantic-conventions/src/metric.rs index 837101e0df..086d6a9b6b 100644 --- a/opentelemetry-semantic-conventions/src/metric.rs +++ b/opentelemetry-semantic-conventions/src/metric.rs @@ -183,6 +183,152 @@ pub const ASPNETCORE_RATE_LIMITING_REQUESTS: &str = "aspnetcore.rate_limiting.re /// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally_required`: if and only if a route was successfully matched. pub const ASPNETCORE_ROUTING_MATCH_ATTEMPTS: &str = "aspnetcore.routing.match_attempts"; +/// ## Description +/// +/// Number of active client instances +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{instance}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT: &str = + "azure.cosmosdb.client.active_instance.count"; + +/// ## Description +/// +/// [Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `{request_unit}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::AZURE_COSMOSDB_CONSISTENCY_LEVEL`] | `Conditionally_required`: If available. +/// | [`crate::attribute::AZURE_COSMOSDB_OPERATION_CONTACTED_REGIONS`] | `{"recommended": "if available"}` +/// | [`crate::attribute::AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE`] | `Conditionally_required`: when response was received and contained sub-code. +/// | [`crate::attribute::DB_COLLECTION_NAME`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally_required`: If readily available and if there is a single operation name that describes the database call. The operation name MAY be parsed from the query text, in which case it SHOULD be the single operation name found in the query. +/// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE: &str = + "azure.cosmosdb.client.operation.request_charge"; + +/// ## Description +/// +/// The number of pipeline runs currently active in the system by state +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{run}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CICD_PIPELINE_NAME`] | `Required` +/// | [`crate::attribute::CICD_PIPELINE_RUN_STATE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const CICD_PIPELINE_RUN_ACTIVE: &str = "cicd.pipeline.run.active"; + +/// ## Description +/// +/// Duration of a pipeline run grouped by pipeline, state and result +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CICD_PIPELINE_NAME`] | `Required` +/// | [`crate::attribute::CICD_PIPELINE_RESULT`] | `Conditionally_required`: If and only if the pipeline run result has been set during that state. +/// | [`crate::attribute::CICD_PIPELINE_RUN_STATE`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the pipeline run failed. +#[cfg(feature = "semconv_experimental")] +pub const CICD_PIPELINE_RUN_DURATION: &str = "cicd.pipeline.run.duration"; + +/// ## Description +/// +/// The number of errors encountered in pipeline runs (eg. compile, test failures). +/// +/// ## Notes +/// +/// There might be errors in a pipeline run that are non fatal (eg. they are suppressed) or in a parallel stage multiple stages could have a fatal error. +/// This means that this error count might not be the same as the count of metric `cicd.pipeline.run.duration` with run result `failure` +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CICD_PIPELINE_NAME`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const CICD_PIPELINE_RUN_ERRORS: &str = "cicd.pipeline.run.errors"; + +/// ## Description +/// +/// The number of errors in a component of the CICD system (eg. controller, scheduler, agent). +/// +/// ## Notes +/// +/// Errors in pipeline run execution are explicitly excluded. Ie a test failure is not counted in this metric +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CICD_SYSTEM_COMPONENT`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const CICD_SYSTEM_ERRORS: &str = "cicd.system.errors"; + +/// ## Description +/// +/// The number of workers on the CICD system by state +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{count}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CICD_WORKER_STATE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const CICD_WORKER_COUNT: &str = "cicd.worker.count"; + /// ## Description /// /// Total CPU time consumed @@ -195,7 +341,7 @@ pub const ASPNETCORE_ROUTING_MATCH_ATTEMPTS: &str = "aspnetcore.routing.match_at /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -216,7 +362,7 @@ pub const CONTAINER_CPU_TIME: &str = "container.cpu.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -237,7 +383,7 @@ pub const CONTAINER_CPU_USAGE: &str = "container.cpu.usage"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -259,7 +405,7 @@ pub const CONTAINER_DISK_IO: &str = "container.disk.io"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const CONTAINER_MEMORY_USAGE: &str = "container.memory.usage"; @@ -275,7 +421,7 @@ pub const CONTAINER_MEMORY_USAGE: &str = "container.memory.usage"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -298,7 +444,7 @@ pub const CONTAINER_NETWORK_IO: &str = "container.network.io"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const CONTAINER_UPTIME: &str = "container.uptime"; @@ -310,7 +456,7 @@ pub const CONTAINER_UPTIME: &str = "container.uptime"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -328,7 +474,7 @@ pub const DB_CLIENT_CONNECTION_COUNT: &str = "db.client.connection.count"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -345,7 +491,7 @@ pub const DB_CLIENT_CONNECTION_CREATE_TIME: &str = "db.client.connection.create_ /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -362,7 +508,7 @@ pub const DB_CLIENT_CONNECTION_IDLE_MAX: &str = "db.client.connection.idle.max"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -379,7 +525,7 @@ pub const DB_CLIENT_CONNECTION_IDLE_MIN: &str = "db.client.connection.idle.min"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -396,7 +542,7 @@ pub const DB_CLIENT_CONNECTION_MAX: &str = "db.client.connection.max"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{request}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -413,7 +559,7 @@ pub const DB_CLIENT_CONNECTION_PENDING_REQUESTS: &str = "db.client.connection.pe /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{timeout}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -430,7 +576,7 @@ pub const DB_CLIENT_CONNECTION_TIMEOUTS: &str = "db.client.connection.timeouts"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -447,7 +593,7 @@ pub const DB_CLIENT_CONNECTION_USE_TIME: &str = "db.client.connection.use_time"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -464,7 +610,7 @@ pub const DB_CLIENT_CONNECTION_WAIT_TIME: &str = "db.client.connection.wait_time /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -484,7 +630,7 @@ pub const DB_CLIENT_CONNECTIONS_CREATE_TIME: &str = "db.client.connections.creat /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -502,7 +648,7 @@ pub const DB_CLIENT_CONNECTIONS_IDLE_MAX: &str = "db.client.connections.idle.max /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -520,7 +666,7 @@ pub const DB_CLIENT_CONNECTIONS_IDLE_MIN: &str = "db.client.connections.idle.min /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -538,7 +684,7 @@ pub const DB_CLIENT_CONNECTIONS_MAX: &str = "db.client.connections.max"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{request}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -556,7 +702,7 @@ pub const DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: &str = "db.client.connections. /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{timeout}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -574,7 +720,7 @@ pub const DB_CLIENT_CONNECTIONS_TIMEOUTS: &str = "db.client.connections.timeouts /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -593,7 +739,7 @@ pub const DB_CLIENT_CONNECTIONS_USAGE: &str = "db.client.connections.usage"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -613,7 +759,7 @@ pub const DB_CLIENT_CONNECTIONS_USE_TIME: &str = "db.client.connections.use_time /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -627,13 +773,13 @@ pub const DB_CLIENT_CONNECTIONS_WAIT_TIME: &str = "db.client.connections.wait_ti /// ## Description /// -/// Number of active client instances +/// Deprecated, use `azure.cosmosdb.client.active_instance.count` instead /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{instance}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -641,18 +787,19 @@ pub const DB_CLIENT_CONNECTIONS_WAIT_TIME: &str = "db.client.connections.wait_ti /// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` /// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.client.active_instance.count`.")] pub const DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: &str = "db.client.cosmosdb.active_instance.count"; /// ## Description /// -/// [Request charge](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation +/// Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{request_unit}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -663,11 +810,8 @@ pub const DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: &str = /// | [`crate::attribute::DB_COSMOSDB_SUB_STATUS_CODE`] | `Conditionally_required`: when response was received and contained sub-code. /// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally_required`: If available. /// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally_required`: If readily available and if there is a single operation name that describes the database call. The operation name MAY be parsed from the query text, in which case it SHOULD be the single operation name found in the query. -/// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. #[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `azure.cosmosdb.client.operation.request_charge`.")] pub const DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: &str = "db.client.cosmosdb.operation.request_charge"; @@ -683,7 +827,7 @@ pub const DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: &str = /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Release_candidate` | /// /// ## Attributes /// | Name | Requirement | @@ -694,7 +838,7 @@ pub const DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: &str = /// | [`crate::attribute::DB_QUERY_SUMMARY`] | `{"recommended": "if readily available or if instrumentation supports query summarization."}` /// | [`crate::attribute::DB_QUERY_TEXT`] | `Opt_in` /// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. -/// | [`crate::attribute::DB_SYSTEM`] | `Required` +/// | [`crate::attribute::DB_SYSTEM_NAME`] | `Required` /// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. /// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `{"recommended": "if applicable for this database system."}` /// | [`crate::attribute::NETWORK_PEER_PORT`] | `{"recommended": "if and only if `network.peer.address` is set."}` @@ -711,7 +855,7 @@ pub const DB_CLIENT_OPERATION_DURATION: &str = "db.client.operation.duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{row}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -722,7 +866,7 @@ pub const DB_CLIENT_OPERATION_DURATION: &str = "db.client.operation.duration"; /// | [`crate::attribute::DB_QUERY_SUMMARY`] | `{"recommended": "if readily available or if instrumentation supports query summarization."}` /// | [`crate::attribute::DB_QUERY_TEXT`] | `Opt_in` /// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. -/// | [`crate::attribute::DB_SYSTEM`] | `Required` +/// | [`crate::attribute::DB_SYSTEM_NAME`] | `Required` /// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. /// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `{"recommended": "if applicable for this database system."}` /// | [`crate::attribute::NETWORK_PEER_PORT`] | `{"recommended": "if and only if `network.peer.address` is set."}` @@ -739,7 +883,7 @@ pub const DB_CLIENT_RESPONSE_RETURNED_ROWS: &str = "db.client.response.returned_ /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -762,8 +906,7 @@ pub const DNS_LOOKUP_DURATION: &str = "dns.lookup.duration"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{assembly}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_ASSEMBLY_COUNT: &str = "dotnet.assembly.count"; /// ## Description @@ -779,13 +922,12 @@ pub const DOTNET_ASSEMBLY_COUNT: &str = "dotnet.assembly.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{exception}` | -/// | Status: | `Experimental` | +/// | Status: | `Stable` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::ERROR_TYPE`] | `Required` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_EXCEPTIONS: &str = "dotnet.exceptions"; /// ## Description @@ -801,13 +943,12 @@ pub const DOTNET_EXCEPTIONS: &str = "dotnet.exceptions"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{collection}` | -/// | Status: | `Experimental` | +/// | Status: | `Stable` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_GC_COLLECTIONS: &str = "dotnet.gc.collections"; /// ## Description @@ -823,8 +964,7 @@ pub const DOTNET_GC_COLLECTIONS: &str = "dotnet.gc.collections"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_GC_HEAP_TOTAL_ALLOCATED: &str = "dotnet.gc.heap.total_allocated"; /// ## Description @@ -840,13 +980,12 @@ pub const DOTNET_GC_HEAP_TOTAL_ALLOCATED: &str = "dotnet.gc.heap.total_allocated /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Stable` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_GC_LAST_COLLECTION_HEAP_FRAGMENTATION_SIZE: &str = "dotnet.gc.last_collection.heap.fragmentation.size"; @@ -863,13 +1002,12 @@ pub const DOTNET_GC_LAST_COLLECTION_HEAP_FRAGMENTATION_SIZE: &str = /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Stable` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_GC_LAST_COLLECTION_HEAP_SIZE: &str = "dotnet.gc.last_collection.heap.size"; /// ## Description @@ -885,8 +1023,7 @@ pub const DOTNET_GC_LAST_COLLECTION_HEAP_SIZE: &str = "dotnet.gc.last_collection /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_GC_LAST_COLLECTION_MEMORY_COMMITTED_SIZE: &str = "dotnet.gc.last_collection.memory.committed_size"; @@ -903,8 +1040,7 @@ pub const DOTNET_GC_LAST_COLLECTION_MEMORY_COMMITTED_SIZE: &str = /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_GC_PAUSE_TIME: &str = "dotnet.gc.pause.time"; /// ## Description @@ -920,8 +1056,7 @@ pub const DOTNET_GC_PAUSE_TIME: &str = "dotnet.gc.pause.time"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_JIT_COMPILATION_TIME: &str = "dotnet.jit.compilation.time"; /// ## Description @@ -937,8 +1072,7 @@ pub const DOTNET_JIT_COMPILATION_TIME: &str = "dotnet.jit.compilation.time"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_JIT_COMPILED_IL_SIZE: &str = "dotnet.jit.compiled_il.size"; /// ## Description @@ -954,8 +1088,7 @@ pub const DOTNET_JIT_COMPILED_IL_SIZE: &str = "dotnet.jit.compiled_il.size"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{method}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_JIT_COMPILED_METHODS: &str = "dotnet.jit.compiled_methods"; /// ## Description @@ -971,8 +1104,7 @@ pub const DOTNET_JIT_COMPILED_METHODS: &str = "dotnet.jit.compiled_methods"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{contention}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_MONITOR_LOCK_CONTENTIONS: &str = "dotnet.monitor.lock_contentions"; /// ## Description @@ -988,8 +1120,7 @@ pub const DOTNET_MONITOR_LOCK_CONTENTIONS: &str = "dotnet.monitor.lock_contentio /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_PROCESS_CPU_COUNT: &str = "dotnet.process.cpu.count"; /// ## Description @@ -1005,13 +1136,12 @@ pub const DOTNET_PROCESS_CPU_COUNT: &str = "dotnet.process.cpu.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Stable` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::CPU_MODE`] | `Required` -#[cfg(feature = "semconv_experimental")] pub const DOTNET_PROCESS_CPU_TIME: &str = "dotnet.process.cpu.time"; /// ## Description @@ -1027,8 +1157,7 @@ pub const DOTNET_PROCESS_CPU_TIME: &str = "dotnet.process.cpu.time"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_PROCESS_MEMORY_WORKING_SET: &str = "dotnet.process.memory.working_set"; /// ## Description @@ -1044,8 +1173,7 @@ pub const DOTNET_PROCESS_MEMORY_WORKING_SET: &str = "dotnet.process.memory.worki /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{work_item}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_THREAD_POOL_QUEUE_LENGTH: &str = "dotnet.thread_pool.queue.length"; /// ## Description @@ -1061,8 +1189,7 @@ pub const DOTNET_THREAD_POOL_QUEUE_LENGTH: &str = "dotnet.thread_pool.queue.leng /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{thread}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_THREAD_POOL_THREAD_COUNT: &str = "dotnet.thread_pool.thread.count"; /// ## Description @@ -1078,8 +1205,7 @@ pub const DOTNET_THREAD_POOL_THREAD_COUNT: &str = "dotnet.thread_pool.thread.cou /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{work_item}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_THREAD_POOL_WORK_ITEM_COUNT: &str = "dotnet.thread_pool.work_item.count"; /// ## Description @@ -1095,8 +1221,7 @@ pub const DOTNET_THREAD_POOL_WORK_ITEM_COUNT: &str = "dotnet.thread_pool.work_it /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{timer}` | -/// | Status: | `Experimental` | -#[cfg(feature = "semconv_experimental")] +/// | Status: | `Stable` | pub const DOTNET_TIMER_COUNT: &str = "dotnet.timer.count"; /// ## Description @@ -1107,7 +1232,7 @@ pub const DOTNET_TIMER_COUNT: &str = "dotnet.timer.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{coldstart}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1124,7 +1249,7 @@ pub const FAAS_COLDSTARTS: &str = "faas.coldstarts"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1141,7 +1266,7 @@ pub const FAAS_CPU_USAGE: &str = "faas.cpu_usage"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{error}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1158,7 +1283,7 @@ pub const FAAS_ERRORS: &str = "faas.errors"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1175,7 +1300,7 @@ pub const FAAS_INIT_DURATION: &str = "faas.init_duration"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{invocation}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1192,7 +1317,7 @@ pub const FAAS_INVOCATIONS: &str = "faas.invocations"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1209,7 +1334,7 @@ pub const FAAS_INVOKE_DURATION: &str = "faas.invoke_duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1226,7 +1351,7 @@ pub const FAAS_MEM_USAGE: &str = "faas.mem_usage"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1243,7 +1368,7 @@ pub const FAAS_NET_IO: &str = "faas.net_io"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{timeout}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1260,7 +1385,7 @@ pub const FAAS_TIMEOUTS: &str = "faas.timeouts"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1283,7 +1408,7 @@ pub const GEN_AI_CLIENT_OPERATION_DURATION: &str = "gen_ai.client.operation.dura /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{token}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1306,7 +1431,7 @@ pub const GEN_AI_CLIENT_TOKEN_USAGE: &str = "gen_ai.client.token.usage"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1329,7 +1454,7 @@ pub const GEN_AI_SERVER_REQUEST_DURATION: &str = "gen_ai.server.request.duration /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1351,7 +1476,7 @@ pub const GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: &str = "gen_ai.server.time_per_ou /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1377,7 +1502,7 @@ pub const GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: &str = "gen_ai.server.time_to_first /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `%` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_CONFIG_GOGC: &str = "go.config.gogc"; @@ -1393,7 +1518,7 @@ pub const GO_CONFIG_GOGC: &str = "go.config.gogc"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{goroutine}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_GOROUTINE_COUNT: &str = "go.goroutine.count"; @@ -1409,7 +1534,7 @@ pub const GO_GOROUTINE_COUNT: &str = "go.goroutine.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_ALLOCATED: &str = "go.memory.allocated"; @@ -1425,7 +1550,7 @@ pub const GO_MEMORY_ALLOCATED: &str = "go.memory.allocated"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{allocation}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_ALLOCATIONS: &str = "go.memory.allocations"; @@ -1441,7 +1566,7 @@ pub const GO_MEMORY_ALLOCATIONS: &str = "go.memory.allocations"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_GC_GOAL: &str = "go.memory.gc.goal"; @@ -1457,7 +1582,7 @@ pub const GO_MEMORY_GC_GOAL: &str = "go.memory.gc.goal"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_LIMIT: &str = "go.memory.limit"; @@ -1473,7 +1598,7 @@ pub const GO_MEMORY_LIMIT: &str = "go.memory.limit"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1494,7 +1619,7 @@ pub const GO_MEMORY_USED: &str = "go.memory.used"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{thread}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_PROCESSOR_LIMIT: &str = "go.processor.limit"; @@ -1510,7 +1635,7 @@ pub const GO_PROCESSOR_LIMIT: &str = "go.processor.limit"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const GO_SCHEDULE_DURATION: &str = "go.schedule.duration"; @@ -1522,7 +1647,7 @@ pub const GO_SCHEDULE_DURATION: &str = "go.schedule.duration"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{request}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1543,7 +1668,7 @@ pub const HTTP_CLIENT_ACTIVE_REQUESTS: &str = "http.client.active_requests"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1564,7 +1689,7 @@ pub const HTTP_CLIENT_CONNECTION_DURATION: &str = "http.client.connection.durati /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1590,7 +1715,7 @@ pub const HTTP_CLIENT_OPEN_CONNECTIONS: &str = "http.client.open_connections"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1643,7 +1768,7 @@ pub const HTTP_CLIENT_REQUEST_DURATION: &str = "http.client.request.duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1668,7 +1793,7 @@ pub const HTTP_CLIENT_RESPONSE_BODY_SIZE: &str = "http.client.response.body.size /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{request}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1692,7 +1817,7 @@ pub const HTTP_SERVER_ACTIVE_REQUESTS: &str = "http.server.active_requests"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1747,7 +1872,7 @@ pub const HTTP_SERVER_REQUEST_DURATION: &str = "http.server.request.duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1773,7 +1898,7 @@ pub const HTTP_SERVER_RESPONSE_BODY_SIZE: &str = "http.server.response.body.size /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `J` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1793,7 +1918,7 @@ pub const HW_ENERGY: &str = "hw.energy"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{error}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1818,7 +1943,7 @@ pub const HW_ERRORS: &str = "hw.errors"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `W` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1836,13 +1961,13 @@ pub const HW_POWER: &str = "hw.power"; /// /// ## Notes /// -/// `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time +/// `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1863,7 +1988,7 @@ pub const HW_STATUS: &str = "hw.status"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{buffer}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1880,7 +2005,7 @@ pub const JVM_BUFFER_COUNT: &str = "jvm.buffer.count"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1897,7 +2022,7 @@ pub const JVM_BUFFER_MEMORY_LIMIT: &str = "jvm.buffer.memory.limit"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -1915,7 +2040,7 @@ pub const JVM_BUFFER_MEMORY_USAGE: &str = "jvm.buffer.memory.usage"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2036,7 +2161,7 @@ pub const JVM_MEMORY_COMMITTED: &str = "jvm.memory.committed"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2109,7 +2234,7 @@ pub const JVM_MEMORY_USED_AFTER_LAST_GC: &str = "jvm.memory.used_after_last_gc"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{run_queue_item}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const JVM_SYSTEM_CPU_LOAD_1M: &str = "jvm.system.cpu.load_1m"; @@ -2125,7 +2250,7 @@ pub const JVM_SYSTEM_CPU_LOAD_1M: &str = "jvm.system.cpu.load_1m"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const JVM_SYSTEM_CPU_UTILIZATION: &str = "jvm.system.cpu.utilization"; @@ -2146,6 +2271,336 @@ pub const JVM_SYSTEM_CPU_UTILIZATION: &str = "jvm.system.cpu.utilization"; /// | [`crate::attribute::JVM_THREAD_STATE`] | `Recommended` pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; +/// ## Description +/// +/// The number of actively running jobs for a cronjob +/// +/// ## Notes +/// +/// This metric aligns with the `active` field of the +/// [K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.cronjob`](../resource/k8s.md#cronjob) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{job}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_CRONJOB_ACTIVE_JOBS: &str = "k8s.cronjob.active_jobs"; + +/// ## Description +/// +/// Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod +/// +/// ## Notes +/// +/// This metric aligns with the `currentNumberScheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: &str = "k8s.daemonset.current_scheduled_nodes"; + +/// ## Description +/// +/// Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) +/// +/// ## Notes +/// +/// This metric aligns with the `desiredNumberScheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: &str = "k8s.daemonset.desired_scheduled_nodes"; + +/// ## Description +/// +/// Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod +/// +/// ## Notes +/// +/// This metric aligns with the `numberMisscheduled` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DAEMONSET_MISSCHEDULED_NODES: &str = "k8s.daemonset.misscheduled_nodes"; + +/// ## Description +/// +/// Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready +/// +/// ## Notes +/// +/// This metric aligns with the `numberReady` field of the +/// [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.daemonset`](../resource/k8s.md#daemonset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{node}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DAEMONSET_READY_NODES: &str = "k8s.daemonset.ready_nodes"; + +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment +/// +/// ## Notes +/// +/// This metric aligns with the `availableReplicas` field of the +/// [K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.deployment`](../resource/k8s.md#deployment) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DEPLOYMENT_AVAILABLE_PODS: &str = "k8s.deployment.available_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this deployment +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.deployment`](../resource/k8s.md#deployment) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_DEPLOYMENT_DESIRED_PODS: &str = "k8s.deployment.desired_pods"; + +/// ## Description +/// +/// Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler +/// +/// ## Notes +/// +/// This metric aligns with the `currentReplicas` field of the +/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_CURRENT_PODS: &str = "k8s.hpa.current_pods"; + +/// ## Description +/// +/// Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler +/// +/// ## Notes +/// +/// This metric aligns with the `desiredReplicas` field of the +/// [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_DESIRED_PODS: &str = "k8s.hpa.desired_pods"; + +/// ## Description +/// +/// The upper limit for the number of replica pods to which the autoscaler can scale up +/// +/// ## Notes +/// +/// This metric aligns with the `maxReplicas` field of the +/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_MAX_PODS: &str = "k8s.hpa.max_pods"; + +/// ## Description +/// +/// The lower limit for the number of replica pods to which the autoscaler can scale down +/// +/// ## Notes +/// +/// This metric aligns with the `minReplicas` field of the +/// [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_HPA_MIN_PODS: &str = "k8s.hpa.min_pods"; + +/// ## Description +/// +/// The number of pending and actively running pods for a job +/// +/// ## Notes +/// +/// This metric aligns with the `active` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.job`](../resource/k8s.md#job) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_ACTIVE_PODS: &str = "k8s.job.active_pods"; + +/// ## Description +/// +/// The desired number of successfully finished pods the job should be run with +/// +/// ## Notes +/// +/// This metric aligns with the `completions` field of the +/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.job`](../resource/k8s.md#job) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_DESIRED_SUCCESSFUL_PODS: &str = "k8s.job.desired_successful_pods"; + +/// ## Description +/// +/// The number of pods which reached phase Failed for a job +/// +/// ## Notes +/// +/// This metric aligns with the `failed` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.job`](../resource/k8s.md#job) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_FAILED_PODS: &str = "k8s.job.failed_pods"; + +/// ## Description +/// +/// The max desired number of pods the job should run at any given time +/// +/// ## Notes +/// +/// This metric aligns with the `parallelism` field of the +/// [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.job`](../resource/k8s.md#job) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_MAX_PARALLEL_PODS: &str = "k8s.job.max_parallel_pods"; + +/// ## Description +/// +/// The number of pods which reached phase Succeeded for a job +/// +/// ## Notes +/// +/// This metric aligns with the `succeeded` field of the +/// [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.job`](../resource/k8s.md#job) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_JOB_SUCCESSFUL_PODS: &str = "k8s.job.successful_pods"; + +/// ## Description +/// +/// Describes number of K8s namespaces that are currently in a given phase. +/// +/// ## Notes +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.namespace`](../resource/k8s.md#namespace) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{namespace}` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::K8S_NAMESPACE_PHASE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NAMESPACE_PHASE: &str = "k8s.namespace.phase"; + /// ## Description /// /// Total CPU time consumed @@ -2158,7 +2613,7 @@ pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_NODE_CPU_TIME: &str = "k8s.node.cpu.time"; @@ -2174,7 +2629,7 @@ pub const K8S_NODE_CPU_TIME: &str = "k8s.node.cpu.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_NODE_CPU_USAGE: &str = "k8s.node.cpu.usage"; @@ -2190,7 +2645,7 @@ pub const K8S_NODE_CPU_USAGE: &str = "k8s.node.cpu.usage"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_NODE_MEMORY_USAGE: &str = "k8s.node.memory.usage"; @@ -2202,7 +2657,7 @@ pub const K8S_NODE_MEMORY_USAGE: &str = "k8s.node.memory.usage"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{error}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2220,7 +2675,7 @@ pub const K8S_NODE_NETWORK_ERRORS: &str = "k8s.node.network.errors"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2243,7 +2698,7 @@ pub const K8S_NODE_NETWORK_IO: &str = "k8s.node.network.io"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_NODE_UPTIME: &str = "k8s.node.uptime"; @@ -2259,7 +2714,7 @@ pub const K8S_NODE_UPTIME: &str = "k8s.node.uptime"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_POD_CPU_TIME: &str = "k8s.pod.cpu.time"; @@ -2275,7 +2730,7 @@ pub const K8S_POD_CPU_TIME: &str = "k8s.pod.cpu.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_POD_CPU_USAGE: &str = "k8s.pod.cpu.usage"; @@ -2291,7 +2746,7 @@ pub const K8S_POD_CPU_USAGE: &str = "k8s.pod.cpu.usage"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_POD_MEMORY_USAGE: &str = "k8s.pod.memory.usage"; @@ -2303,7 +2758,7 @@ pub const K8S_POD_MEMORY_USAGE: &str = "k8s.pod.memory.usage"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{error}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2321,7 +2776,7 @@ pub const K8S_POD_NETWORK_ERRORS: &str = "k8s.pod.network.errors"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2344,10 +2799,165 @@ pub const K8S_POD_NETWORK_IO: &str = "k8s.pod.network.io"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const K8S_POD_UPTIME: &str = "k8s.pod.uptime"; +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset +/// +/// ## Notes +/// +/// This metric aligns with the `availableReplicas` field of the +/// [K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.replicaset`](../resource/k8s.md#replicaset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICASET_AVAILABLE_PODS: &str = "k8s.replicaset.available_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this replicaset +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.replicaset`](../resource/k8s.md#replicaset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICASET_DESIRED_PODS: &str = "k8s.replicaset.desired_pods"; + +/// ## Description +/// +/// Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller +/// +/// ## Notes +/// +/// This metric aligns with the `availableReplicas` field of the +/// [K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: &str = + "k8s.replication_controller.available_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this replication controller +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_REPLICATION_CONTROLLER_DESIRED_PODS: &str = "k8s.replication_controller.desired_pods"; + +/// ## Description +/// +/// The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision +/// +/// ## Notes +/// +/// This metric aligns with the `currentReplicas` field of the +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_STATEFULSET_CURRENT_PODS: &str = "k8s.statefulset.current_pods"; + +/// ## Description +/// +/// Number of desired replica pods in this statefulset +/// +/// ## Notes +/// +/// This metric aligns with the `replicas` field of the +/// [K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_STATEFULSET_DESIRED_PODS: &str = "k8s.statefulset.desired_pods"; + +/// ## Description +/// +/// The number of replica pods created for this statefulset with a Ready Condition +/// +/// ## Notes +/// +/// This metric aligns with the `readyReplicas` field of the +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_STATEFULSET_READY_PODS: &str = "k8s.statefulset.ready_pods"; + +/// ## Description +/// +/// Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision +/// +/// ## Notes +/// +/// This metric aligns with the `updatedReplicas` field of the +/// [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). +/// +/// This metric SHOULD, at a minimum, be reported against a +/// [`k8s.statefulset`](../resource/k8s.md#statefulset) resource +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{pod}` | +/// | Status: | `Development` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_STATEFULSET_UPDATED_PODS: &str = "k8s.statefulset.updated_pods"; + /// ## Description /// /// Number of connections that are currently active on the server. @@ -2556,7 +3166,7 @@ pub const KESTREL_UPGRADED_CONNECTIONS: &str = "kestrel.upgraded_connections"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2586,7 +3196,7 @@ pub const MESSAGING_CLIENT_CONSUMED_MESSAGES: &str = "messaging.client.consumed. /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2613,7 +3223,7 @@ pub const MESSAGING_CLIENT_OPERATION_DURATION: &str = "messaging.client.operatio /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2642,7 +3252,7 @@ pub const MESSAGING_CLIENT_PUBLISHED_MESSAGES: &str = "messaging.client.publishe /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2670,7 +3280,7 @@ pub const MESSAGING_CLIENT_SENT_MESSAGES: &str = "messaging.client.sent.messages /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2696,7 +3306,7 @@ pub const MESSAGING_PROCESS_DURATION: &str = "messaging.process.duration"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2717,7 +3327,7 @@ pub const MESSAGING_PROCESS_MESSAGES: &str = "messaging.process.messages"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2738,7 +3348,7 @@ pub const MESSAGING_PUBLISH_DURATION: &str = "messaging.publish.duration"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2759,7 +3369,7 @@ pub const MESSAGING_PUBLISH_MESSAGES: &str = "messaging.publish.messages"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2780,7 +3390,7 @@ pub const MESSAGING_RECEIVE_DURATION: &str = "messaging.receive.duration"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{message}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2805,7 +3415,7 @@ pub const MESSAGING_RECEIVE_MESSAGES: &str = "messaging.receive.messages"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MAX: &str = "nodejs.eventloop.delay.max"; @@ -2821,7 +3431,7 @@ pub const NODEJS_EVENTLOOP_DELAY_MAX: &str = "nodejs.eventloop.delay.max"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MEAN: &str = "nodejs.eventloop.delay.mean"; @@ -2837,7 +3447,7 @@ pub const NODEJS_EVENTLOOP_DELAY_MEAN: &str = "nodejs.eventloop.delay.mean"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MIN: &str = "nodejs.eventloop.delay.min"; @@ -2853,7 +3463,7 @@ pub const NODEJS_EVENTLOOP_DELAY_MIN: &str = "nodejs.eventloop.delay.min"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P50: &str = "nodejs.eventloop.delay.p50"; @@ -2869,7 +3479,7 @@ pub const NODEJS_EVENTLOOP_DELAY_P50: &str = "nodejs.eventloop.delay.p50"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P90: &str = "nodejs.eventloop.delay.p90"; @@ -2885,7 +3495,7 @@ pub const NODEJS_EVENTLOOP_DELAY_P90: &str = "nodejs.eventloop.delay.p90"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P99: &str = "nodejs.eventloop.delay.p99"; @@ -2901,7 +3511,7 @@ pub const NODEJS_EVENTLOOP_DELAY_P99: &str = "nodejs.eventloop.delay.p99"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_STDDEV: &str = "nodejs.eventloop.delay.stddev"; @@ -2917,7 +3527,7 @@ pub const NODEJS_EVENTLOOP_DELAY_STDDEV: &str = "nodejs.eventloop.delay.stddev"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2938,7 +3548,7 @@ pub const NODEJS_EVENTLOOP_TIME: &str = "nodejs.eventloop.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_UTILIZATION: &str = "nodejs.eventloop.utilization"; @@ -2950,7 +3560,7 @@ pub const NODEJS_EVENTLOOP_UTILIZATION: &str = "nodejs.eventloop.utilization"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2967,7 +3577,7 @@ pub const PROCESS_CONTEXT_SWITCHES: &str = "process.context_switches"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -2984,7 +3594,7 @@ pub const PROCESS_CPU_TIME: &str = "process.cpu.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3001,7 +3611,7 @@ pub const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3018,7 +3628,7 @@ pub const PROCESS_DISK_IO: &str = "process.disk.io"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const PROCESS_MEMORY_USAGE: &str = "process.memory.usage"; @@ -3030,7 +3640,7 @@ pub const PROCESS_MEMORY_USAGE: &str = "process.memory.usage"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual"; @@ -3042,7 +3652,7 @@ pub const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3059,7 +3669,7 @@ pub const PROCESS_NETWORK_IO: &str = "process.network.io"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: &str = "process.open_file_descriptor.count"; @@ -3071,7 +3681,7 @@ pub const PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: &str = "process.open_file_descript /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{fault}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3088,7 +3698,7 @@ pub const PROCESS_PAGING_FAULTS: &str = "process.paging.faults"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{thread}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const PROCESS_THREAD_COUNT: &str = "process.thread.count"; @@ -3105,7 +3715,7 @@ pub const PROCESS_THREAD_COUNT: &str = "process.thread.count"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const PROCESS_UPTIME: &str = "process.uptime"; @@ -3124,7 +3734,7 @@ pub const PROCESS_UPTIME: &str = "process.uptime"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_DURATION: &str = "rpc.client.duration"; @@ -3140,7 +3750,7 @@ pub const RPC_CLIENT_DURATION: &str = "rpc.client.duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_REQUEST_SIZE: &str = "rpc.client.request.size"; @@ -3158,7 +3768,7 @@ pub const RPC_CLIENT_REQUEST_SIZE: &str = "rpc.client.request.size"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_REQUESTS_PER_RPC: &str = "rpc.client.requests_per_rpc"; @@ -3174,7 +3784,7 @@ pub const RPC_CLIENT_REQUESTS_PER_RPC: &str = "rpc.client.requests_per_rpc"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_RESPONSE_SIZE: &str = "rpc.client.response.size"; @@ -3192,7 +3802,7 @@ pub const RPC_CLIENT_RESPONSE_SIZE: &str = "rpc.client.response.size"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_RESPONSES_PER_RPC: &str = "rpc.client.responses_per_rpc"; @@ -3211,7 +3821,7 @@ pub const RPC_CLIENT_RESPONSES_PER_RPC: &str = "rpc.client.responses_per_rpc"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_DURATION: &str = "rpc.server.duration"; @@ -3227,7 +3837,7 @@ pub const RPC_SERVER_DURATION: &str = "rpc.server.duration"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_REQUEST_SIZE: &str = "rpc.server.request.size"; @@ -3245,7 +3855,7 @@ pub const RPC_SERVER_REQUEST_SIZE: &str = "rpc.server.request.size"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_REQUESTS_PER_RPC: &str = "rpc.server.requests_per_rpc"; @@ -3261,7 +3871,7 @@ pub const RPC_SERVER_REQUESTS_PER_RPC: &str = "rpc.server.requests_per_rpc"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_RESPONSE_SIZE: &str = "rpc.server.response.size"; @@ -3279,7 +3889,7 @@ pub const RPC_SERVER_RESPONSE_SIZE: &str = "rpc.server.response.size"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `{count}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_RESPONSES_PER_RPC: &str = "rpc.server.responses_per_rpc"; @@ -3333,7 +3943,7 @@ pub const SIGNALR_SERVER_CONNECTION_DURATION: &str = "signalr.server.connection. /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{Hz}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3345,24 +3955,32 @@ pub const SYSTEM_CPU_FREQUENCY: &str = "system.cpu.frequency"; /// ## Description /// /// Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking +/// +/// ## Notes +/// +/// Calculated by multiplying the number of sockets by the number of cores per socket, and then by the number of threads per core /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_LOGICAL_COUNT: &str = "system.cpu.logical.count"; /// ## Description /// /// Reports the number of actual physical processor cores on the hardware +/// +/// ## Notes +/// +/// Calculated by multiplying the number of sockets by the number of cores per socket /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{cpu}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_PHYSICAL_COUNT: &str = "system.cpu.physical.count"; @@ -3374,7 +3992,7 @@ pub const SYSTEM_CPU_PHYSICAL_COUNT: &str = "system.cpu.physical.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3392,7 +4010,7 @@ pub const SYSTEM_CPU_TIME: &str = "system.cpu.time"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3408,7 +4026,7 @@ pub const SYSTEM_CPU_UTILIZATION: &str = "system.cpu.utilization"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3435,7 +4053,7 @@ pub const SYSTEM_DISK_IO: &str = "system.disk.io"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3452,7 +4070,7 @@ pub const SYSTEM_DISK_IO_TIME: &str = "system.disk.io_time"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3467,7 +4085,7 @@ pub const SYSTEM_DISK_LIMIT: &str = "system.disk.limit"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{operation}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3492,7 +4110,7 @@ pub const SYSTEM_DISK_MERGED: &str = "system.disk.merged"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3508,7 +4126,7 @@ pub const SYSTEM_DISK_OPERATION_TIME: &str = "system.disk.operation_time"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{operation}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3526,7 +4144,7 @@ pub const SYSTEM_DISK_OPERATIONS: &str = "system.disk.operations"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3551,7 +4169,7 @@ pub const SYSTEM_FILESYSTEM_LIMIT: &str = "system.filesystem.limit"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3570,7 +4188,7 @@ pub const SYSTEM_FILESYSTEM_USAGE: &str = "system.filesystem.usage"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3599,7 +4217,7 @@ pub const SYSTEM_FILESYSTEM_UTILIZATION: &str = "system.filesystem.utilization"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_LINUX_MEMORY_AVAILABLE: &str = "system.linux.memory.available"; @@ -3617,7 +4235,7 @@ pub const SYSTEM_LINUX_MEMORY_AVAILABLE: &str = "system.linux.memory.available"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3638,7 +4256,7 @@ pub const SYSTEM_LINUX_MEMORY_SLAB_USAGE: &str = "system.linux.memory.slab.usage /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_LIMIT: &str = "system.memory.limit"; @@ -3655,7 +4273,7 @@ pub const SYSTEM_MEMORY_LIMIT: &str = "system.memory.limit"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_SHARED: &str = "system.memory.shared"; @@ -3672,7 +4290,7 @@ pub const SYSTEM_MEMORY_SHARED: &str = "system.memory.shared"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3687,7 +4305,7 @@ pub const SYSTEM_MEMORY_USAGE: &str = "system.memory.usage"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3702,14 +4320,14 @@ pub const SYSTEM_MEMORY_UTILIZATION: &str = "system.memory.utilization"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{connection}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::NETWORK_CONNECTION_STATE`] | `Recommended` /// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` /// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` -/// | [`crate::attribute::SYSTEM_NETWORK_STATE`] | `Recommended` #[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_CONNECTIONS: &str = "system.network.connections"; @@ -3729,7 +4347,7 @@ pub const SYSTEM_NETWORK_CONNECTIONS: &str = "system.network.connections"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{packet}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3755,7 +4373,7 @@ pub const SYSTEM_NETWORK_DROPPED: &str = "system.network.dropped"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{error}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3771,7 +4389,7 @@ pub const SYSTEM_NETWORK_ERRORS: &str = "system.network.errors"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3787,7 +4405,7 @@ pub const SYSTEM_NETWORK_IO: &str = "system.network.io"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{packet}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3803,7 +4421,7 @@ pub const SYSTEM_NETWORK_PACKETS: &str = "system.network.packets"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{fault}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3818,7 +4436,7 @@ pub const SYSTEM_PAGING_FAULTS: &str = "system.paging.faults"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{operation}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3836,7 +4454,7 @@ pub const SYSTEM_PAGING_OPERATIONS: &str = "system.paging.operations"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3852,7 +4470,7 @@ pub const SYSTEM_PAGING_USAGE: &str = "system.paging.usage"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3870,7 +4488,7 @@ pub const SYSTEM_PAGING_UTILIZATION: &str = "system.paging.utilization"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{process}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3887,7 +4505,7 @@ pub const SYSTEM_PROCESS_COUNT: &str = "system.process.count"; /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{process}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_PROCESS_CREATED: &str = "system.process.created"; @@ -3904,7 +4522,7 @@ pub const SYSTEM_PROCESS_CREATED: &str = "system.process.created"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const SYSTEM_UPTIME: &str = "system.uptime"; @@ -3920,7 +4538,7 @@ pub const SYSTEM_UPTIME: &str = "system.uptime"; /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3941,7 +4559,7 @@ pub const V8JS_GC_DURATION: &str = "v8js.gc.duration"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3962,7 +4580,7 @@ pub const V8JS_HEAP_SPACE_AVAILABLE_SIZE: &str = "v8js.heap.space.available_size /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -3983,7 +4601,7 @@ pub const V8JS_HEAP_SPACE_PHYSICAL_SIZE: &str = "v8js.heap.space.physical_size"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -4004,7 +4622,7 @@ pub const V8JS_MEMORY_HEAP_LIMIT: &str = "v8js.memory.heap.limit"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -4021,12 +4639,13 @@ pub const V8JS_MEMORY_HEAP_USED: &str = "v8js.memory.heap.used"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{change}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::VCS_CHANGE_STATE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_CHANGE_COUNT: &str = "vcs.change.count"; @@ -4039,13 +4658,14 @@ pub const VCS_CHANGE_COUNT: &str = "vcs.change.count"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::VCS_CHANGE_STATE`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_CHANGE_DURATION: &str = "vcs.change.duration"; @@ -4058,16 +4678,42 @@ pub const VCS_CHANGE_DURATION: &str = "vcs.change.duration"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::VCS_REF_BASE_NAME`] | `Recommended` +/// | [`crate::attribute::VCS_REF_BASE_REVISION`] | `Opt_in` /// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_REVISION`] | `Opt_in` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_CHANGE_TIME_TO_APPROVAL: &str = "vcs.change.time_to_approval"; +/// ## Description +/// +/// The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Development` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_REF_BASE_NAME`] | `Recommended` +/// | [`crate::attribute::VCS_REF_BASE_REVISION`] | `Opt_in` +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_REVISION`] | `Opt_in` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_TIME_TO_MERGE: &str = "vcs.change.time_to_merge"; + /// ## Description /// /// The number of unique contributors to a repository @@ -4076,11 +4722,12 @@ pub const VCS_CHANGE_TIME_TO_APPROVAL: &str = "vcs.change.time_to_approval"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{contributor}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_CONTRIBUTOR_COUNT: &str = "vcs.contributor.count"; @@ -4093,19 +4740,20 @@ pub const VCS_CONTRIBUTOR_COUNT: &str = "vcs.contributor.count"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{ref}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::VCS_REF_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_REF_COUNT: &str = "vcs.ref.count"; /// ## Description /// -/// The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute +/// The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute. /// /// ## Notes /// @@ -4117,7 +4765,7 @@ pub const VCS_REF_COUNT: &str = "vcs.ref.count"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{line}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -4128,6 +4776,7 @@ pub const VCS_REF_COUNT: &str = "vcs.ref.count"; /// | [`crate::attribute::VCS_REF_BASE_TYPE`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_REF_LINES_DELTA: &str = "vcs.ref.lines_delta"; @@ -4145,7 +4794,7 @@ pub const VCS_REF_LINES_DELTA: &str = "vcs.ref.lines_delta"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `{revision}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | @@ -4155,6 +4804,7 @@ pub const VCS_REF_LINES_DELTA: &str = "vcs.ref.lines_delta"; /// | [`crate::attribute::VCS_REF_BASE_TYPE`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` /// | [`crate::attribute::VCS_REVISION_DELTA_DIRECTION`] | `Required` #[cfg(feature = "semconv_experimental")] @@ -4168,13 +4818,14 @@ pub const VCS_REF_REVISIONS_DELTA: &str = "vcs.ref.revisions_delta"; /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `s` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` /// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_NAME`] | `Recommended` /// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` #[cfg(feature = "semconv_experimental")] pub const VCS_REF_TIME: &str = "vcs.ref.time"; @@ -4187,6 +4838,6 @@ pub const VCS_REF_TIME: &str = "vcs.ref.time"; /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{repository}` | -/// | Status: | `Experimental` | +/// | Status: | `Development` | #[cfg(feature = "semconv_experimental")] pub const VCS_REPOSITORY_COUNT: &str = "vcs.repository.count"; diff --git a/opentelemetry-semantic-conventions/src/trace.rs b/opentelemetry-semantic-conventions/src/trace.rs index a37ce1dead..7cbe6df87c 100644 --- a/opentelemetry-semantic-conventions/src/trace.rs +++ b/opentelemetry-semantic-conventions/src/trace.rs @@ -94,6 +94,9 @@ pub use crate::attribute::AWS_DYNAMODB_TABLE_NAMES; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_TOTAL_SEGMENTS; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AWS_EXTENDED_REQUEST_ID; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LAMBDA_INVOKED_ARN; @@ -124,60 +127,54 @@ pub use crate::attribute::AZ_NAMESPACE; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::AZ_SERVICE_REQUEST_ID; -pub use crate::attribute::CLIENT_ADDRESS; - -pub use crate::attribute::CLIENT_PORT; - #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::CLOUD_RESOURCE_ID; +pub use crate::attribute::AZURE_CLIENT_ID; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_CONSISTENCY_LEVEL; +pub use crate::attribute::AZURE_COSMOSDB_CONNECTION_MODE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_COORDINATOR_DC; +pub use crate::attribute::AZURE_COSMOSDB_CONSISTENCY_LEVEL; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_COORDINATOR_ID; +pub use crate::attribute::AZURE_COSMOSDB_OPERATION_CONTACTED_REGIONS; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_IDEMPOTENCE; +pub use crate::attribute::AZURE_COSMOSDB_OPERATION_REQUEST_CHARGE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_PAGE_SIZE; +pub use crate::attribute::AZURE_COSMOSDB_REQUEST_BODY_SIZE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT; +pub use crate::attribute::AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COLLECTION_NAME; +pub use crate::attribute::CASSANDRA_CONSISTENCY_LEVEL; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_CLIENT_ID; +pub use crate::attribute::CASSANDRA_COORDINATOR_DC; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_CONNECTION_MODE; +pub use crate::attribute::CASSANDRA_COORDINATOR_ID; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_CONSISTENCY_LEVEL; +pub use crate::attribute::CASSANDRA_PAGE_SIZE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_REGIONS_CONTACTED; +pub use crate::attribute::CASSANDRA_QUERY_IDEMPOTENT; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_REQUEST_CHARGE; +pub use crate::attribute::CASSANDRA_SPECULATIVE_EXECUTION_COUNT; -#[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_REQUEST_CONTENT_LENGTH; +pub use crate::attribute::CLIENT_ADDRESS; -#[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_COSMOSDB_SUB_STATUS_CODE; +pub use crate::attribute::CLIENT_PORT; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_ELASTICSEARCH_NODE_NAME; +pub use crate::attribute::CLOUD_RESOURCE_ID; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_ELASTICSEARCH_PATH_PARTS; +pub use crate::attribute::DB_COLLECTION_NAME; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_NAMESPACE; @@ -204,10 +201,14 @@ pub use crate::attribute::DB_RESPONSE_RETURNED_ROWS; pub use crate::attribute::DB_RESPONSE_STATUS_CODE; #[cfg(feature = "semconv_experimental")] -pub use crate::attribute::DB_SYSTEM; +pub use crate::attribute::DB_SYSTEM_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::ELASTICSEARCH_NODE_NAME; pub use crate::attribute::ERROR_TYPE; +#[allow(deprecated)] pub use crate::attribute::EXCEPTION_ESCAPED; pub use crate::attribute::EXCEPTION_MESSAGE; @@ -279,9 +280,6 @@ pub use crate::attribute::FEATURE_FLAG_VERSION; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT; -#[cfg(feature = "semconv_experimental")] -pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SEED; - #[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SERVICE_TIER; @@ -309,6 +307,9 @@ pub use crate::attribute::GEN_AI_REQUEST_MODEL; #[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_PRESENCE_PENALTY; +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_REQUEST_SEED; + #[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_STOP_SEQUENCES; diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs index 6313474dd1..da984b9843 100644 --- a/opentelemetry-stdout/src/logs/exporter.rs +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -1,6 +1,6 @@ use chrono::{DateTime, Utc}; use core::fmt; -use opentelemetry_sdk::export::logs::LogBatch; +use opentelemetry_sdk::logs::LogBatch; use opentelemetry_sdk::logs::LogResult; use opentelemetry_sdk::Resource; use std::sync::atomic; @@ -29,7 +29,7 @@ impl fmt::Debug for LogExporter { } } -impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { +impl opentelemetry_sdk::logs::LogExporter for LogExporter { /// Export spans to stdout #[allow(clippy::manual_async_fn)] fn export( @@ -75,7 +75,10 @@ impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { fn print_logs(batch: LogBatch<'_>) { for (i, log) in batch.iter().enumerate() { println!("Log #{}", i); - let (record, _library) = log; + let (record, library) = log; + + println!("\t Instrumentation Scope: {:?}", library); + if let Some(event_name) = record.event_name() { println!("\t EventName: {:?}", event_name); } diff --git a/opentelemetry-stdout/src/logs/mod.rs b/opentelemetry-stdout/src/logs/mod.rs index 76a8b1debe..8c48429366 100644 --- a/opentelemetry-stdout/src/logs/mod.rs +++ b/opentelemetry-stdout/src/logs/mod.rs @@ -2,7 +2,7 @@ //! //! The stdout [`LogExporter`] writes debug printed [`LogRecord`]s to Stdout. //! -//! [`LogExporter`]: opentelemetry_sdk::export::logs::LogExporter +//! [`LogExporter`]: opentelemetry_sdk::logs::LogExporter //! [`LogRecord`]: opentelemetry::logs::LogRecord mod exporter; pub use exporter::*; diff --git a/opentelemetry-stdout/src/metrics/exporter.rs b/opentelemetry-stdout/src/metrics/exporter.rs index 54feb33c41..0981f939c4 100644 --- a/opentelemetry-stdout/src/metrics/exporter.rs +++ b/opentelemetry-stdout/src/metrics/exporter.rs @@ -2,7 +2,10 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use core::{f64, fmt}; use opentelemetry_sdk::metrics::{ - data::{self, ScopeMetrics}, + data::{ + ExponentialHistogram, Gauge, GaugeDataPoint, Histogram, HistogramDataPoint, + ResourceMetrics, ScopeMetrics, Sum, SumDataPoint, + }, exporter::PushMetricExporter, }; use opentelemetry_sdk::metrics::{MetricError, MetricResult, Temporality}; @@ -36,7 +39,7 @@ impl fmt::Debug for MetricExporter { #[async_trait] impl PushMetricExporter for MetricExporter { /// Write Metrics to stdout - async fn export(&self, metrics: &mut data::ResourceMetrics) -> MetricResult<()> { + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> { if self.is_shutdown.load(atomic::Ordering::SeqCst) { Err(MetricError::Other("exporter is shut down".into())) } else { @@ -97,34 +100,34 @@ fn print_metrics(metrics: &[ScopeMetrics]) { println!("\t\tUnit : {}", &metric.unit); let data = metric.data.as_any(); - if let Some(hist) = data.downcast_ref::>() { + if let Some(hist) = data.downcast_ref::>() { println!("\t\tType : Histogram"); print_histogram(hist); - } else if let Some(hist) = data.downcast_ref::>() { + } else if let Some(hist) = data.downcast_ref::>() { println!("\t\tType : Histogram"); print_histogram(hist); - } else if let Some(_hist) = data.downcast_ref::>() { + } else if let Some(_hist) = data.downcast_ref::>() { println!("\t\tType : Exponential Histogram"); // TODO - } else if let Some(_hist) = data.downcast_ref::>() { + } else if let Some(_hist) = data.downcast_ref::>() { println!("\t\tType : Exponential Histogram"); // TODO - } else if let Some(sum) = data.downcast_ref::>() { + } else if let Some(sum) = data.downcast_ref::>() { println!("\t\tType : Sum"); print_sum(sum); - } else if let Some(sum) = data.downcast_ref::>() { + } else if let Some(sum) = data.downcast_ref::>() { println!("\t\tType : Sum"); print_sum(sum); - } else if let Some(sum) = data.downcast_ref::>() { + } else if let Some(sum) = data.downcast_ref::>() { println!("\t\tType : Sum"); print_sum(sum); - } else if let Some(gauge) = data.downcast_ref::>() { + } else if let Some(gauge) = data.downcast_ref::>() { println!("\t\tType : Gauge"); print_gauge(gauge); - } else if let Some(gauge) = data.downcast_ref::>() { + } else if let Some(gauge) = data.downcast_ref::>() { println!("\t\tType : Gauge"); print_gauge(gauge); - } else if let Some(gauge) = data.downcast_ref::>() { + } else if let Some(gauge) = data.downcast_ref::>() { println!("\t\tType : Gauge"); print_gauge(gauge); } else { @@ -134,7 +137,7 @@ fn print_metrics(metrics: &[ScopeMetrics]) { } } -fn print_sum(sum: &data::Sum) { +fn print_sum(sum: &Sum) { println!("\t\tSum DataPoints"); println!("\t\tMonotonic : {}", sum.is_monotonic); if sum.temporality == Temporality::Cumulative { @@ -155,7 +158,7 @@ fn print_sum(sum: &data::Sum) { print_sum_data_points(&sum.data_points); } -fn print_gauge(gauge: &data::Gauge) { +fn print_gauge(gauge: &Gauge) { println!("\t\tGauge DataPoints"); if let Some(start_time) = gauge.start_time { let datetime: DateTime = start_time.into(); @@ -172,7 +175,7 @@ fn print_gauge(gauge: &data::Gauge) { print_gauge_data_points(&gauge.data_points); } -fn print_histogram(histogram: &data::Histogram) { +fn print_histogram(histogram: &Histogram) { if histogram.temporality == Temporality::Cumulative { println!("\t\tTemporality : Cumulative"); } else { @@ -192,7 +195,7 @@ fn print_histogram(histogram: &data::Histogram) { print_hist_data_points(&histogram.data_points); } -fn print_sum_data_points(data_points: &[data::SumDataPoint]) { +fn print_sum_data_points(data_points: &[SumDataPoint]) { for (i, data_point) in data_points.iter().enumerate() { println!("\t\tDataPoint #{}", i); println!("\t\t\tValue : {:#?}", data_point.value); @@ -203,7 +206,7 @@ fn print_sum_data_points(data_points: &[data::SumDataPoint]) { } } -fn print_gauge_data_points(data_points: &[data::GaugeDataPoint]) { +fn print_gauge_data_points(data_points: &[GaugeDataPoint]) { for (i, data_point) in data_points.iter().enumerate() { println!("\t\tDataPoint #{}", i); println!("\t\t\tValue : {:#?}", data_point.value); @@ -214,7 +217,7 @@ fn print_gauge_data_points(data_points: &[data::GaugeDataPoint]) { } } -fn print_hist_data_points(data_points: &[data::HistogramDataPoint]) { +fn print_hist_data_points(data_points: &[HistogramDataPoint]) { for (i, data_point) in data_points.iter().enumerate() { println!("\t\tDataPoint #{}", i); println!("\t\t\tCount : {}", data_point.count); @@ -231,6 +234,20 @@ fn print_hist_data_points(data_points: &[data::HistogramDataPoint]) for kv in data_point.attributes.iter() { println!("\t\t\t\t -> {}: {}", kv.key, kv.value.as_str()); } + + println!("\t\t\tBuckets"); + let mut lower_bound = f64::NEG_INFINITY; + for (i, &upper_bound) in data_point.bounds.iter().enumerate() { + let count = data_point.bucket_counts.get(i).unwrap_or(&0); + println!("\t\t\t\t {} to {} : {}", lower_bound, upper_bound, count); + lower_bound = upper_bound; + } + + let last_count = data_point + .bucket_counts + .get(data_point.bounds.len()) + .unwrap_or(&0); + println!("\t\t\t\t{} to +Infinity : {}", lower_bound, last_count); } } diff --git a/opentelemetry-stdout/src/trace/exporter.rs b/opentelemetry-stdout/src/trace/exporter.rs index bf00909890..9653ae77f9 100644 --- a/opentelemetry-stdout/src/trace/exporter.rs +++ b/opentelemetry-stdout/src/trace/exporter.rs @@ -2,7 +2,7 @@ use chrono::{DateTime, Utc}; use core::fmt; use futures_util::future::BoxFuture; use opentelemetry::trace::TraceError; -use opentelemetry_sdk::export::{self, trace::ExportResult}; +use opentelemetry_sdk::trace::{ExportResult, SpanData}; use std::sync::atomic; use opentelemetry_sdk::resource::Resource; @@ -30,9 +30,9 @@ impl Default for SpanExporter { } } -impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { +impl opentelemetry_sdk::trace::SpanExporter for SpanExporter { /// Write Spans to stdout - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { if self.is_shutdown.load(atomic::Ordering::SeqCst) { Box::pin(std::future::ready(Err(TraceError::from( "exporter is shut down", @@ -68,7 +68,7 @@ impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { } } -fn print_spans(batch: Vec) { +fn print_spans(batch: Vec) { for (i, span) in batch.into_iter().enumerate() { println!("Span #{}", i); println!("\tInstrumentation Scope"); diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index 53890c02f0..dbfd549076 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -9,10 +9,10 @@ use model::endpoint::Endpoint; use opentelemetry::{global, trace::TraceError, InstrumentationScope, KeyValue}; use opentelemetry_http::HttpClient; use opentelemetry_sdk::{ - export::{trace, ExportError}, resource::{ResourceDetector, SdkProvidedResourceDetector}, + trace, trace::{Config, Tracer, TracerProvider}, - Resource, + ExportError, Resource, }; use opentelemetry_semantic_conventions as semcov; use std::borrow::Cow; @@ -116,7 +116,7 @@ impl ZipkinPipelineBuilder { } else { let service_name = SdkProvidedResourceDetector .detect() - .get(semcov::resource::SERVICE_NAME.into()) + .get(&semcov::resource::SERVICE_NAME.into()) .unwrap() .to_string(); ( diff --git a/opentelemetry-zipkin/src/exporter/model/mod.rs b/opentelemetry-zipkin/src/exporter/model/mod.rs index a78708a2ae..9f799e34d8 100644 --- a/opentelemetry-zipkin/src/exporter/model/mod.rs +++ b/opentelemetry-zipkin/src/exporter/model/mod.rs @@ -2,7 +2,7 @@ use opentelemetry::{ trace::{SpanKind, Status}, Key, KeyValue, }; -use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::trace::SpanData; use std::collections::HashMap; use std::time::{Duration, SystemTime}; diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 51223be92b..fb19a93371 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -60,7 +60,7 @@ mod tests { use crate::exporter::model::span::{Kind, Span}; use crate::exporter::model::{into_zipkin_span, OTEL_ERROR_DESCRIPTION, OTEL_STATUS_CODE}; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId}; - use opentelemetry_sdk::export::trace::SpanData; + use opentelemetry_sdk::trace::SpanData; use opentelemetry_sdk::trace::{SpanEvents, SpanLinks}; use std::collections::HashMap; use std::net::Ipv4Addr; diff --git a/opentelemetry-zipkin/src/exporter/uploader.rs b/opentelemetry-zipkin/src/exporter/uploader.rs index 84f0581dc5..7e8fe6ec7c 100644 --- a/opentelemetry-zipkin/src/exporter/uploader.rs +++ b/opentelemetry-zipkin/src/exporter/uploader.rs @@ -3,7 +3,7 @@ use crate::exporter::model::span::Span; use crate::exporter::Error; use http::{header::CONTENT_TYPE, Method, Request, Uri}; use opentelemetry_http::{HttpClient, ResponseExt}; -use opentelemetry_sdk::export::trace::ExportResult; +use opentelemetry_sdk::trace::ExportResult; use std::fmt::Debug; use std::sync::Arc; diff --git a/opentelemetry-zipkin/src/lib.rs b/opentelemetry-zipkin/src/lib.rs index 8d414db8a8..1bc9d3ecc9 100644 --- a/opentelemetry-zipkin/src/lib.rs +++ b/opentelemetry-zipkin/src/lib.rs @@ -87,7 +87,7 @@ //! ```no_run //! use opentelemetry::{global, KeyValue, trace::Tracer}; //! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; -//! use opentelemetry_sdk::export::trace::ExportResult; +//! use opentelemetry_sdk::trace::ExportResult; //! use opentelemetry_http::{HttpClient, HttpError}; //! use async_trait::async_trait; //! use bytes::Bytes; diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 5c5d280c43..2977e271a5 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -4,6 +4,7 @@ - Bump msrv to 1.75.0. - **Breaking** `opentelemetry::global::shutdown_tracer_provider()` Removed from this crate, should now use `tracer_provider.shutdown()` see [#2369](https://github.com/open-telemetry/opentelemetry-rust/pull/2369) for a migration example. +- *Breaking* Removed unused `opentelemetry::PropagationError` struct. ## 0.27.1 diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 8175cf4412..6dbf7f2757 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -34,10 +34,9 @@ js-sys = "0.3.63" default = ["trace", "metrics", "logs", "internal-logs"] trace = ["pin-project-lite", "futures-sink", "futures-core", "thiserror"] metrics = [] -testing = ["trace", "metrics"] +testing = ["trace"] logs = [] spec_unstable_logs_enabled = ["logs"] -otel_unstable = [] internal-logs = ["tracing"] [dev-dependencies] diff --git a/opentelemetry/src/global/internal_logging.rs b/opentelemetry/src/global/internal_logging.rs index e27f43c693..dad083ac1f 100644 --- a/opentelemetry/src/global/internal_logging.rs +++ b/opentelemetry/src/global/internal_logging.rs @@ -1,7 +1,7 @@ #![allow(unused_macros)] /// /// **Note**: These macros (`otel_info!`, `otel_warn!`, `otel_debug!`, and `otel_error!`) are intended to be used -/// **internally within OpenTelemetry code** or for **custom exporters and processors**. They are not designed +/// **internally within OpenTelemetry code** or for **custom exporters, processors and other plugins**. They are not designed /// for general application logging and should not be used for that purpose. /// /// Macro for logging informational messages in OpenTelemetry. diff --git a/opentelemetry/src/global/metrics.rs b/opentelemetry/src/global/metrics.rs index 457bc662a6..450ba2d3bc 100644 --- a/opentelemetry/src/global/metrics.rs +++ b/opentelemetry/src/global/metrics.rs @@ -15,6 +15,9 @@ fn global_meter_provider() -> &'static RwLock { /// Sets the given [`MeterProvider`] instance as the current global meter /// provider. +/// Libraries should NOT call this function. It is intended for applications/executables. +/// +/// **NOTE:** This function should be called before getting [`Meter`] instances via [`meter()`] or [`meter_with_scope()`]. Otherwise, you could get no-op [`Meter`] instances. pub fn set_meter_provider

(new_provider: P) where P: metrics::MeterProvider + Send + Sync + 'static, @@ -44,6 +47,9 @@ pub fn meter_provider() -> GlobalMeterProvider { /// Creates a named [`Meter`] via the currently configured global [`MeterProvider`]. /// /// This is a more convenient way of expressing `global::meter_provider().meter(name)`. +/// +/// **NOTE:** Calls to [`meter()`] return a [`Meter`] backed by the global [`MeterProvider`] configured during the method invocation. +/// If the global [`MeterProvider`] is changed after getting [`Meter`] instances from these calls, the [`Meter`] instances returned will not reflect the change. pub fn meter(name: &'static str) -> Meter { meter_provider().meter(name) } @@ -52,6 +58,9 @@ pub fn meter(name: &'static str) -> Meter { /// /// This is a simpler alternative to `global::meter_provider().meter_with_scope(...)` /// +/// **NOTE:** Calls to [`meter_with_scope()`] return a [`Meter`] backed by the global [`MeterProvider`] configured during the method invocation. +/// If the global [`MeterProvider`] is changed after getting [`Meter`] instances from these calls, the [`Meter`] instances returned will not reflect the change. +/// /// # Example /// /// ``` diff --git a/opentelemetry/src/global/trace.rs b/opentelemetry/src/global/trace.rs index 8121e4fd9a..30018a576e 100644 --- a/opentelemetry/src/global/trace.rs +++ b/opentelemetry/src/global/trace.rs @@ -422,6 +422,7 @@ pub fn tracer_with_scope(scope: InstrumentationScope) -> BoxedTracer { /// It returns the [`TracerProvider`] instance that was previously mounted as global provider /// (e.g. [`NoopTracerProvider`] if a provider had not been set before). /// +/// Libraries should NOT call this function. It is intended for applications/executables. /// [`TracerProvider`]: crate::trace::TracerProvider pub fn set_tracer_provider(new_provider: P) -> GlobalTracerProvider where diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index 10f8facef7..ad906171b2 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -277,7 +277,7 @@ pub mod trace; pub mod logs; #[doc(hidden)] -#[cfg(any(feature = "metrics", feature = "trace"))] +#[cfg(any(feature = "metrics", feature = "trace", feature = "logs"))] pub mod time { use std::time::SystemTime; diff --git a/opentelemetry/src/propagation/mod.rs b/opentelemetry/src/propagation/mod.rs index 35005f881a..863517980b 100644 --- a/opentelemetry/src/propagation/mod.rs +++ b/opentelemetry/src/propagation/mod.rs @@ -20,7 +20,6 @@ //! [`Context`]: crate::Context use std::collections::HashMap; -use thiserror::Error; pub mod composite; pub mod text_map_propagator; @@ -62,37 +61,6 @@ impl Extractor for HashMap { } } -/// Error when extracting or injecting context data(i.e propagating) across application boundaries. -#[derive(Error, Debug)] -#[error("Cannot {} from {}, {}", ops, message, propagator_name)] -pub struct PropagationError { - message: &'static str, - // which propagator does this error comes from - propagator_name: &'static str, - // are we extracting or injecting information across application boundaries - ops: &'static str, -} - -impl PropagationError { - /// Error happens when extracting information - pub fn extract(message: &'static str, propagator_name: &'static str) -> Self { - PropagationError { - message, - propagator_name, - ops: "extract", - } - } - - /// Error happens when extracting information - pub fn inject(message: &'static str, propagator_name: &'static str) -> Self { - PropagationError { - message, - propagator_name, - ops: "inject", - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/scripts/integration_tests.sh b/scripts/integration_tests.sh index 36aec3100b..377ec7ba62 100755 --- a/scripts/integration_tests.sh +++ b/scripts/integration_tests.sh @@ -16,11 +16,10 @@ if [ -d "$TEST_DIR" ]; then # Run tests with the reqwest-client feature echo echo #### - echo "Integration Tests: Reqwest Client (Disabled now)" + echo "Integration Tests: Reqwest Client" echo #### echo - # TODO: reqwest client is not supported with thread based processor and reader. Enable this test once it is supported. - #cargo test --no-default-features --features "reqwest-client","internal-logs" + cargo test --no-default-features --features "reqwest-client","internal-logs" # Run tests with the reqwest-blocking-client feature echo @@ -36,8 +35,7 @@ if [ -d "$TEST_DIR" ]; then echo "Integration Tests: Hyper Client (Disabled now)" echo #### echo - # TODO: hyper client is not supported with thread based processor and reader. Enable this test once it is supported. - #cargo test --no-default-features --features "hyper-client","internal-logs" + cargo test --no-default-features --features "hyper-client","internal-logs" --test logs else echo "Directory $TEST_DIR does not exist. Skipping tests." exit 1 diff --git a/scripts/test.sh b/scripts/test.sh index 467d5f7c4a..b77df21e1c 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -13,6 +13,10 @@ cargo test --workspace --all-features --lib echo "Running tests for opentelemetry package with --no-default-features" cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features --lib +# Run tests for non-workspace member crate +echo "Running tests for opentelemetry-prometheus with --all-features" +(cd opentelemetry-prometheus && cargo test --all-features --lib) + # Run global tracer provider test in single thread # //TODO: This tests were not running for a while. Need to find out how to run # run them. Using --ignored will run other tests as well, so that cannot be used. diff --git a/stress/src/logs.rs b/stress/src/logs.rs index 2242d48eea..4119ac0d35 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -11,7 +11,7 @@ use opentelemetry::InstrumentationScope; use opentelemetry_appender_tracing::layer; -use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LogResult, LoggerProvider}; use tracing::error; diff --git a/stress/src/traces.rs b/stress/src/traces.rs index 73b5563c36..95dfd88eb3 100644 --- a/stress/src/traces.rs +++ b/stress/src/traces.rs @@ -15,7 +15,7 @@ use opentelemetry::{ Context, KeyValue, }; use opentelemetry_sdk::{ - export::trace::SpanData, + trace::SpanData, trace::{self as sdktrace, SpanProcessor}, };