diff --git a/charts/k8s-monitoring/charts/feature-application-observability/templates/_receiver_jaeger.tpl b/charts/k8s-monitoring/charts/feature-application-observability/templates/_receiver_jaeger.tpl index f859d3a0a..8b39095cc 100644 --- a/charts/k8s-monitoring/charts/feature-application-observability/templates/_receiver_jaeger.tpl +++ b/charts/k8s-monitoring/charts/feature-application-observability/templates/_receiver_jaeger.tpl @@ -1,26 +1,26 @@ {{/* Inputs: Values (values) tracesOutput */}} {{- define "feature.applicationObservability.receiver.jaeger.alloy" }} -{{- if or .Values.receivers.jaeger.grpc.enabled .Values.receivers.jaeger.thriftBinary.enabled .Values.receivers.jaeger.thriftCompact.enabled .Values.receivers.jaeger.thriftBinary.enabled }} +{{- if or .Values.receivers.jaeger.grpc.enabled .Values.receivers.jaeger.thriftBinary.enabled .Values.receivers.jaeger.thriftCompact.enabled .Values.receivers.jaeger.thriftHttp.enabled }} otelcol.receiver.jaeger "receiver" { protocols { -{{- if .Values.receivers.jaeger.grpc.enabled -}} +{{- if .Values.receivers.jaeger.grpc.enabled }} grpc { - endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.grpc | int }}" + endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.grpc.port | int }}" } {{- end }} {{- if .Values.receivers.jaeger.thriftBinary.enabled }} thrift_binary { - endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftBinary | int }}" + endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftBinary.port | int }}" } {{- end }} {{- if .Values.receivers.jaeger.thriftCompact.enabled }} thrift_compact { - endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftCompact | int }}" + endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftCompact.port | int }}" } {{- end }} {{- if .Values.receivers.jaeger.thriftHttp.enabled }} thrift_http { - endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftHttp | int }}" + endpoint = "0.0.0.0:{{ .Values.receivers.jaeger.thriftHttp.port | int }}" } {{- end }} } diff --git a/charts/k8s-monitoring/charts/feature-application-observability/tests/default_test.yaml b/charts/k8s-monitoring/charts/feature-application-observability/tests/default_test.yaml index 765492263..a83f8a707 100644 --- a/charts/k8s-monitoring/charts/feature-application-observability/tests/default_test.yaml +++ b/charts/k8s-monitoring/charts/feature-application-observability/tests/default_test.yaml @@ -33,15 +33,15 @@ tests: argument "metrics_destinations" { comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" } - + argument "logs_destinations" { comment = "Must be a list of log destinations where collected logs should be forwarded to" } - + argument "traces_destinations" { comment = "Must be a list of trace destinations where collected trace should be forwarded to" } - + // Receivers --> Resource Detection Processor otelcol.receiver.otlp "receiver" { grpc { @@ -60,20 +60,21 @@ tests: } } otelcol.receiver.jaeger "receiver" { - protocols {grpc { - endpoint = "0.0.0.0:0" + protocols { + grpc { + endpoint = "0.0.0.0:14250" } thrift_binary { - endpoint = "0.0.0.0:0" + endpoint = "0.0.0.0:6832" } thrift_compact { - endpoint = "0.0.0.0:0" + endpoint = "0.0.0.0:6831" } thrift_http { - endpoint = "0.0.0.0:0" + endpoint = "0.0.0.0:14268" } } - + debug_metrics { disable_high_cardinality_metrics = true } @@ -90,21 +91,21 @@ tests: traces = [otelcol.processor.resourcedetection.default.input] } } - + // Resource Detection Processor --> K8s Attribute Processor otelcol.processor.resourcedetection "default" { detectors = ["env", "system"] system { hostname_sources = ["os"] } - + output { metrics = [otelcol.processor.k8sattributes.default.input] logs = [otelcol.processor.k8sattributes.default.input] traces = [otelcol.processor.k8sattributes.default.input] } } - + // K8s Attribute Processor --> Transform Processor // Resource Detection Processor Traces --> Host Info Connector otelcol.processor.k8sattributes "default" { @@ -116,7 +117,7 @@ tests: from = "connection" } } - + output { metrics = [otelcol.processor.transform.default.input] logs = [otelcol.processor.transform.default.input] @@ -126,13 +127,13 @@ tests: // Host Info Connector --> Batch Processor otelcol.connector.host_info "default" { host_identifiers = [ "k8s.node.name" ] - + output { metrics = [otelcol.processor.batch.default.input] } } - - + + // Transform Processor --> Batch Processor otelcol.processor.transform "default" { error_mode = "ignore" @@ -144,7 +145,7 @@ tests: "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", ] } - + output { metrics = [otelcol.processor.batch.default.input] logs = [otelcol.processor.batch.default.input] diff --git a/charts/k8s-monitoring/charts/feature-application-observability/tests/jaeger_test.yaml b/charts/k8s-monitoring/charts/feature-application-observability/tests/jaeger_test.yaml new file mode 100644 index 000000000..7caa8e6ac --- /dev/null +++ b/charts/k8s-monitoring/charts/feature-application-observability/tests/jaeger_test.yaml @@ -0,0 +1,177 @@ +--- +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces rule:empty-lines +suite: Test Loki integration +templates: + - configmap.yaml +tests: + - it: should allow you to enable just the jaeger grpc receiver + set: + deployAsConfigMap: true + receivers: + jaeger: + grpc: + enabled: true + asserts: + - isKind: + of: ConfigMap + - matchRegex: + path: data["module.alloy"] + # The pattern should look like this, but since the regex is escaped, it will be a bit different + # otelcol.receiver.jaeger "receiver" { + # protocols { + # grpc { + # endpoint = "0.0.0.0:14250" + # } + # } + # + # debug_metrics { + # disable_high_cardinality_metrics = true + # } + # output { + # traces = [otelcol.processor.resourcedetection.default.input] + # } + # } + pattern: |- + \s*otelcol\.receiver\.jaeger "receiver"\s*\{ + \s* protocols\s*\{ + \s* grpc\s*\{ + \s* endpoint = "0\.0\.0\.0:14250" + \s* \} + \s* \} + \s* + \s* debug_metrics\s*\{ + \s* disable_high_cardinality_metrics = true + \s* \} + \s* output\s*\{ + \s* traces = \[otelcol\.processor\.resourcedetection\.default\.input\] + \s* \} + \s*} + + - it: should allow you to enable just the jaeger thrift binary receiver + set: + deployAsConfigMap: true + receivers: + jaeger: + thriftBinary: + enabled: true + asserts: + - isKind: + of: ConfigMap + - matchRegex: + path: data["module.alloy"] + # The pattern should look like this, but since the regex is escaped, it will be a bit different + # otelcol.receiver.jaeger "receiver" { + # protocols { + # thrift_binary { + # endpoint = "0.0.0.0:6832" + # } + # } + # + # debug_metrics { + # disable_high_cardinality_metrics = true + # } + # output { + # traces = [otelcol.processor.resourcedetection.default.input] + # } + # } + pattern: |- + \s*otelcol\.receiver\.jaeger "receiver"\s*\{ + \s* protocols\s*\{ + \s* thrift_binary\s*\{ + \s* endpoint = "0\.0\.0\.0:6832" + \s* \} + \s* \} + \s* + \s* debug_metrics\s*\{ + \s* disable_high_cardinality_metrics = true + \s* \} + \s* output\s*\{ + \s* traces = \[otelcol\.processor\.resourcedetection\.default\.input\] + \s* \} + \s*} + + - it: should allow you to enable just the jaeger thrift compact receiver + set: + deployAsConfigMap: true + receivers: + jaeger: + thriftCompact: + enabled: true + asserts: + - isKind: + of: ConfigMap + - matchRegex: + path: data["module.alloy"] + # The pattern should look like this, but since the regex is escaped, it will be a bit different + # otelcol.receiver.jaeger "receiver" { + # protocols { + # thrift_compact { + # endpoint = "0.0.0.0:6831" + # } + # } + # + # debug_metrics { + # disable_high_cardinality_metrics = true + # } + # output { + # traces = [otelcol.processor.resourcedetection.default.input] + # } + # } + pattern: |- + \s*otelcol\.receiver\.jaeger "receiver"\s*\{ + \s* protocols\s*\{ + \s* thrift_compact\s*\{ + \s* endpoint = "0\.0\.0\.0:6831" + \s* \} + \s* \} + \s* + \s* debug_metrics\s*\{ + \s* disable_high_cardinality_metrics = true + \s* \} + \s* output\s*\{ + \s* traces = \[otelcol\.processor\.resourcedetection\.default\.input\] + \s* \} + \s*} + + - it: should allow you to enable just the jaeger thrift http receiver + set: + deployAsConfigMap: true + receivers: + jaeger: + thriftHttp: + enabled: true + asserts: + - isKind: + of: ConfigMap + - matchRegex: + path: data["module.alloy"] + # The pattern should look like this, but since the regex is escaped, it will be a bit different + # otelcol.receiver.jaeger "receiver" { + # protocols { + # thrift_http { + # endpoint = "0.0.0.0:14268" + # } + # } + # + # debug_metrics { + # disable_high_cardinality_metrics = true + # } + # output { + # traces = [otelcol.processor.resourcedetection.default.input] + # } + # } + pattern: |- + \s*otelcol\.receiver\.jaeger "receiver"\s*\{ + \s* protocols\s*\{ + \s* thrift_http\s*\{ + \s* endpoint = "0\.0\.0\.0:14268" + \s* \} + \s* \} + \s* + \s* debug_metrics\s*\{ + \s* disable_high_cardinality_metrics = true + \s* \} + \s* output\s*\{ + \s* traces = \[otelcol\.processor\.resourcedetection\.default\.input\] + \s* \} + \s*} diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy index 40b2427dc..db80656f7 100644 --- a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy @@ -151,7 +151,21 @@ declare "application_observability" { comment = "Must be a list of trace destinations where collected trace should be forwarded to" } - // Receivers --> Resource Detection Processor + // Receivers --> Resource Detection Processor + otelcol.receiver.jaeger "receiver" { + protocols { + thrift_http { + endpoint = "0.0.0.0:14268" + } + } + + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + traces = [otelcol.processor.resourcedetection.default.input] + } + } // Resource Detection Processor --> K8s Attribute Processor otelcol.processor.resourcedetection "default" { diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml index 85e7ee5e0..4710102d3 100644 --- a/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml @@ -552,7 +552,21 @@ data: comment = "Must be a list of trace destinations where collected trace should be forwarded to" } - // Receivers --> Resource Detection Processor + // Receivers --> Resource Detection Processor + otelcol.receiver.jaeger "receiver" { + protocols { + thrift_http { + endpoint = "0.0.0.0:14268" + } + } + + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + traces = [otelcol.processor.resourcedetection.default.input] + } + } // Resource Detection Processor --> K8s Attribute Processor otelcol.processor.resourcedetection "default" { diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy index 36b2caecc..736d12b62 100644 --- a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy @@ -173,8 +173,9 @@ declare "application_observability" { // Receivers --> Resource Detection Processor otelcol.receiver.jaeger "receiver" { - protocols {grpc { - endpoint = "0.0.0.0:0" + protocols { + grpc { + endpoint = "0.0.0.0:14250" } } diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml index eab6af779..ead541209 100644 --- a/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml @@ -583,8 +583,9 @@ data: // Receivers --> Resource Detection Processor otelcol.receiver.jaeger "receiver" { - protocols {grpc { - endpoint = "0.0.0.0:0" + protocols { + grpc { + endpoint = "0.0.0.0:14250" } } diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/README.md b/charts/k8s-monitoring/docs/examples/meta-monitoring/README.md index 7ec9676f6..5d879d2cf 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/README.md +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/README.md @@ -18,6 +18,10 @@ destinations: - name: loki type: loki url: http://loki.loki.svc:3100/api/push + - name: loki + type: otlp + protocol: http + url: http://otlp-gateway.svc:443/otlp integrations: collector: alloy-singleton @@ -80,6 +84,7 @@ clusterEvents: enabled: true collector: alloy-singleton namespaces: + - collectors - logs - metrics - o11y @@ -97,7 +102,7 @@ clusterMetrics: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } apiServer: enabled: false @@ -118,7 +123,7 @@ clusterMetrics: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } metricsTuning: useDefaultAllowList: false @@ -145,10 +150,19 @@ podLogs: gatherMethod: kubernetesApi collector: alloy-singleton namespaces: + - collectors - logs - metrics - o11y +applicationObservability: + enabled: true + receivers: + jaeger: + thriftHttp: + enabled: true + port: 14268 + # Collectors alloy-singleton: enabled: true @@ -163,5 +177,11 @@ alloy-profiles: enabled: false alloy-receiver: - enabled: false + enabled: true + alloy: + extraPorts: + - name: jaeger-http + port: 14268 + targetPort: 14268 + protocol: TCP ``` diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-receiver.alloy index e69de29bb..cc8d26e1e 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-receiver.alloy +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-receiver.alloy @@ -0,0 +1,227 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + add_metric_suffixes = true + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "loki-meta-monitoring-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "loki-meta-monitoring-cluster" + target_label = "cluster" + } + } + + wal { + truncate_frequency = "2h" + min_keepalive_time = "5m" + max_keepalive_time = "8h" + } +} +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/api/push" + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster = "loki-meta-monitoring-cluster", + "k8s_cluster_name" = "loki-meta-monitoring-cluster", + } +} +// Destination: loki (otlp) + +otelcol.processor.attributes "loki" { + action { + key = "cluster" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + output { + metrics = [otelcol.processor.transform.loki.input] + logs = [otelcol.processor.transform.loki.input] + traces = [otelcol.processor.transform.loki.input] + } +} + +otelcol.processor.transform "loki" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.loki.input] + logs = [otelcol.processor.batch.loki.input] + traces = [otelcol.processor.batch.loki.input] + } +} + +otelcol.processor.batch "loki" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.loki.input] + logs = [otelcol.exporter.otlphttp.loki.input] + traces = [otelcol.exporter.otlphttp.loki.input] + } +} +otelcol.exporter.otlphttp "loki" { + client { + endpoint = "http://otlp-gateway.svc:443/otlp" + tls { + insecure = false + insecure_skip_verify = false + } + } +} + +// Feature: Application Observability +declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.jaeger "receiver" { + protocols { + thrift_http { + endpoint = "0.0.0.0:14268" + } + } + + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } +} +application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.attributes.loki.input, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy index d2e5b3390..fa395d220 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy @@ -63,6 +63,56 @@ loki.write "loki" { "k8s_cluster_name" = "loki-meta-monitoring-cluster", } } +// Destination: loki (otlp) + +otelcol.processor.attributes "loki" { + action { + key = "cluster" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + output { + metrics = [otelcol.processor.transform.loki.input] + logs = [otelcol.processor.transform.loki.input] + traces = [otelcol.processor.transform.loki.input] + } +} + +otelcol.processor.transform "loki" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.loki.input] + logs = [otelcol.processor.batch.loki.input] + traces = [otelcol.processor.batch.loki.input] + } +} + +otelcol.processor.batch "loki" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.loki.input] + logs = [otelcol.exporter.otlphttp.loki.input] + traces = [otelcol.exporter.otlphttp.loki.input] + } +} +otelcol.exporter.otlphttp "loki" { + client { + endpoint = "http://otlp-gateway.svc:443/otlp" + tls { + insecure = false + insecure_skip_verify = false + } + } +} // Feature: Cluster Metrics declare "cluster_metrics" { @@ -161,7 +211,7 @@ declare "cluster_metrics" { rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } forward_to = argument.metrics_destinations.value } @@ -201,7 +251,7 @@ declare "cluster_metrics" { rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } forward_to = argument.metrics_destinations.value } @@ -259,7 +309,7 @@ declare "cluster_events" { loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" - namespaces = ["logs","metrics","o11y"] + namespaces = ["collectors","logs","metrics","o11y"] forward_to = [loki.process.cluster_events.receiver] } @@ -456,7 +506,7 @@ declare "pod_logs" { discovery.kubernetes "pods" { role = "pod" namespaces { - names = ["logs","metrics","o11y"] + names = ["collectors","logs","metrics","o11y"] } } diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml b/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml index 27c946631..7f3532ba8 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml @@ -1,4 +1,20 @@ --- +# Source: k8s-monitoring/charts/alloy-receiver/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8smon-alloy-receiver + namespace: default + labels: + helm.sh/chart: alloy-receiver-0.11.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.6.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- # Source: k8s-monitoring/charts/alloy-singleton/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount @@ -122,6 +138,56 @@ data: "k8s_cluster_name" = "loki-meta-monitoring-cluster", } } + // Destination: loki (otlp) + + otelcol.processor.attributes "loki" { + action { + key = "cluster" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + output { + metrics = [otelcol.processor.transform.loki.input] + logs = [otelcol.processor.transform.loki.input] + traces = [otelcol.processor.transform.loki.input] + } + } + + otelcol.processor.transform "loki" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.loki.input] + logs = [otelcol.processor.batch.loki.input] + traces = [otelcol.processor.batch.loki.input] + } + } + + otelcol.processor.batch "loki" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.loki.input] + logs = [otelcol.exporter.otlphttp.loki.input] + traces = [otelcol.exporter.otlphttp.loki.input] + } + } + otelcol.exporter.otlphttp "loki" { + client { + endpoint = "http://otlp-gateway.svc:443/otlp" + tls { + insecure = false + insecure_skip_verify = false + } + } + } // Feature: Cluster Metrics declare "cluster_metrics" { @@ -220,7 +286,7 @@ data: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } forward_to = argument.metrics_destinations.value } @@ -260,7 +326,7 @@ data: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } forward_to = argument.metrics_destinations.value } @@ -318,7 +384,7 @@ data: loki.source.kubernetes_events "cluster_events" { job_name = "integrations/kubernetes/eventhandler" log_format = "logfmt" - namespaces = ["logs","metrics","o11y"] + namespaces = ["collectors","logs","metrics","o11y"] forward_to = [loki.process.cluster_events.receiver] } @@ -515,7 +581,7 @@ data: discovery.kubernetes "pods" { role = "pod" namespaces { - names = ["logs","metrics","o11y"] + names = ["collectors","logs","metrics","o11y"] } } @@ -1820,11 +1886,248 @@ data: grafana_kubernetes_monitoring_build_info{version="2.0.4", namespace="default"} 1 # HELP grafana_kubernetes_monitoring_feature_info A metric to report the enabled features of the Kubernetes Monitoring Helm chart # TYPE grafana_kubernetes_monitoring_feature_info gauge + grafana_kubernetes_monitoring_feature_info{feature="applicationObservability", protocols="jaegerthrifthttp", version="1.0.0"} 1 grafana_kubernetes_monitoring_feature_info{deployments="kube-state-metrics,node-exporter", feature="clusterMetrics", sources="cadvisor,kube-state-metrics,node-exporter", version="1.0.0"} 1 grafana_kubernetes_monitoring_feature_info{feature="clusterEvents", version="1.0.0"} 1 grafana_kubernetes_monitoring_feature_info{feature="podLogs", method="kubernetesApi", version="1.0.0"} 1 grafana_kubernetes_monitoring_feature_info{feature="integrations", sources="alloy,loki", version="1.0.0"} 1 --- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8smon-alloy-receiver + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + add_metric_suffixes = true + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "loki-meta-monitoring-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "loki-meta-monitoring-cluster" + target_label = "cluster" + } + } + + wal { + truncate_frequency = "2h" + min_keepalive_time = "5m" + max_keepalive_time = "8h" + } + } + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/api/push" + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster = "loki-meta-monitoring-cluster", + "k8s_cluster_name" = "loki-meta-monitoring-cluster", + } + } + // Destination: loki (otlp) + + otelcol.processor.attributes "loki" { + action { + key = "cluster" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "loki-meta-monitoring-cluster" + } + output { + metrics = [otelcol.processor.transform.loki.input] + logs = [otelcol.processor.transform.loki.input] + traces = [otelcol.processor.transform.loki.input] + } + } + + otelcol.processor.transform "loki" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.loki.input] + logs = [otelcol.processor.batch.loki.input] + traces = [otelcol.processor.batch.loki.input] + } + } + + otelcol.processor.batch "loki" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.loki.input] + logs = [otelcol.exporter.otlphttp.loki.input] + traces = [otelcol.exporter.otlphttp.loki.input] + } + } + otelcol.exporter.otlphttp "loki" { + client { + endpoint = "http://otlp-gateway.svc:443/otlp" + tls { + insecure = false + insecure_skip_verify = false + } + } + } + + // Feature: Application Observability + declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.jaeger "receiver" { + protocols { + thrift_http { + endpoint = "0.0.0.0:14268" + } + } + + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } + } + application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.attributes.loki.input, + ] + } +--- # Source: k8s-monitoring/templates/alloy-modules-configmaps.yaml apiVersion: v1 kind: ConfigMap @@ -3314,6 +3617,106 @@ data: } } --- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8smon-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.11.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.6.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -3570,6 +3973,29 @@ rules: - volumeattachments verbs: ["list", "watch"] --- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8smon-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.11.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.6.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8smon-alloy-receiver +subjects: + - kind: ServiceAccount + name: k8smon-alloy-receiver + namespace: default +--- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -3616,6 +4042,36 @@ subjects: name: k8smon-kube-state-metrics namespace: default --- +# Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: k8smon-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.11.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.6.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" + - name: jaeger-http + port: 14268 + targetPort: 14268 + protocol: TCP +--- # Source: k8s-monitoring/charts/alloy-singleton/templates/service.yaml apiVersion: v1 kind: Service @@ -3698,6 +4154,111 @@ spec: app.kubernetes.io/name: node-exporter app.kubernetes.io/instance: k8smon --- +# Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: k8smon-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.11.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.6.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: k8smon + spec: + serviceAccountName: k8smon-alloy-receiver + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.6.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + - containerPort: 14268 + name: jaeger-http + protocol: TCP + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.14.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: k8smon-alloy-receiver +--- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/values.yaml b/charts/k8s-monitoring/docs/examples/meta-monitoring/values.yaml index d9a5b42f2..c6beedc2c 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/values.yaml +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/values.yaml @@ -9,6 +9,10 @@ destinations: - name: loki type: loki url: http://loki.loki.svc:3100/api/push + - name: loki + type: otlp + protocol: http + url: http://otlp-gateway.svc:443/otlp integrations: collector: alloy-singleton @@ -71,6 +75,7 @@ clusterEvents: enabled: true collector: alloy-singleton namespaces: + - collectors - logs - metrics - o11y @@ -88,7 +93,7 @@ clusterMetrics: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } apiServer: enabled: false @@ -109,7 +114,7 @@ clusterMetrics: rule { action = "keep" source_labels = ["namespace"] - regex = "logs|metrics|o11y" + regex = "collectors|logs|metrics|o11y" } metricsTuning: useDefaultAllowList: false @@ -136,10 +141,19 @@ podLogs: gatherMethod: kubernetesApi collector: alloy-singleton namespaces: + - collectors - logs - metrics - o11y +applicationObservability: + enabled: true + receivers: + jaeger: + thriftHttp: + enabled: true + port: 14268 + # Collectors alloy-singleton: enabled: true @@ -154,4 +168,10 @@ alloy-profiles: enabled: false alloy-receiver: - enabled: false + enabled: true + alloy: + extraPorts: + - name: jaeger-http + port: 14268 + targetPort: 14268 + protocol: TCP diff --git a/charts/k8s-monitoring/templates/destinations/_destination_otlp.tpl b/charts/k8s-monitoring/templates/destinations/_destination_otlp.tpl index 0491a23e1..407871660 100644 --- a/charts/k8s-monitoring/templates/destinations/_destination_otlp.tpl +++ b/charts/k8s-monitoring/templates/destinations/_destination_otlp.tpl @@ -263,10 +263,10 @@ otelcol.exporter.otlp {{ include "helper.alloy_name" .name | quote }} { otelcol.exporter.otlphttp {{ include "helper.alloy_name" .name | quote }} { {{- end }} client { -{{- if .urlFrom }} +{{- if .urlFrom }} endpoint = {{ .urlFrom }} {{- else }} - endpoint = {{ .url | quote }} + endpoint = {{ .url | quote }} {{- end }} {{- if eq .auth.type "basic" }} auth = otelcol.auth.basic.{{ include "helper.alloy_name" .name }}.handler diff --git a/charts/k8s-monitoring/templates/destinations/_destination_validations.tpl b/charts/k8s-monitoring/templates/destinations/_destination_validations.tpl index 51d533559..9ed1cb281 100644 --- a/charts/k8s-monitoring/templates/destinations/_destination_validations.tpl +++ b/charts/k8s-monitoring/templates/destinations/_destination_validations.tpl @@ -19,6 +19,13 @@ {{ fail (printf "\nDestination #%d (%s) is using an unknown type (%s).\nPlease set:\ndestinations:\n - name: %s\n type: \"[%s]\"" $i $destination.name $destination.type $destination.name (include "english_list_or" $types)) }} {{- end }} + {{/* Check if OTLP destination using Grafana Cloud has protocol set */}} + {{- if and (eq $destination.type "otlp") ($destination.url) (contains ".grafana.net" $destination.url) }} + {{- if ne $destination.protocol "http" }} + {{ fail (printf "\nDestination #%d (%s) is using Grafana Cloud OTLP gateway but has incorrect protocol '%s', the gateway only supports 'http'.\nPlease set:\ndestinations:\n - name: %s\n type: otlp\n url: %s\n protocol: http" $i $destination.name ($destination.protocol | default "grpc (default)") $destination.name $destination.url) }} + {{- end }} + {{- end }} + {{- if eq (include "secrets.authType" $destination) "basic" }} {{- if eq (include "secrets.usesSecret" (dict "object" $destination "key" "auth.username")) "false" }} {{ fail (printf "\nDestination #%d (%s) is using basic auth but does not have a username.\nPlease set:\ndestinations:\n - name: %s\n auth:\n type: basic\n username: my-username\n password: my-password" $i $destination.name $destination.name) }} diff --git a/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl b/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl index d449ce916..aad2be81f 100644 --- a/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl @@ -62,13 +62,13 @@ application_observability "feature" { {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.grpc.port "portName" "jaeger-grpc" "portProtocol" "TCP") }} {{- end -}} {{- if $.Values.applicationObservability.receivers.jaeger.thriftBinary.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftBinary.port "portName" "jaeger-grpc" "portProtocol" "TCP") }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftBinary.port "portName" "jaeger-binary" "portProtocol" "TCP") }} {{- end -}} {{- if $.Values.applicationObservability.receivers.jaeger.thriftCompact.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftCompact.port "portName" "jaeger-grpc" "portProtocol" "TCP") }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftCompact.port "portName" "jaeger-compact" "portProtocol" "TCP") }} {{- end -}} {{- if $.Values.applicationObservability.receivers.jaeger.thriftHttp.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftHttp.port "portName" "jaeger-grpc" "portProtocol" "TCP") }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.jaeger.thriftHttp.port "portName" "jaeger-http" "portProtocol" "TCP") }} {{- end -}} {{- include "feature.applicationObservability.validate" (dict "Values" $.Values.applicationObservability) }} {{- end -}}