diff --git a/README.md b/README.md index cda35f3..0909b3b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,44 @@ + + + + # Opentelemetry demo gitops This repository contains Helm chart for [Astroshop](https://github.com/Dynatrace/opentelemetry-demo), an adaptation of the [Opentelemetry Demo](https://github.com/open-telemetry/opentelemetry-demo) app, alongside with: @@ -34,8 +75,8 @@ If you want to deploy the ingress resources (by setting `components.ingress.enab To deploy the helm chart you will first need to set the required values [here](./config/helm-values/values.yaml) -- _components.dt-credentials.tenantEndpoint_ - tenant url including the `/api/v2/otlp`, e.g. **https://wkf10640.live.dynatrace.com/api/v2/otlp** -- _components.dt-credentials.tenantToken_ - access token using the `Kubernetes: Data Ingest` template +- _components.dt-credentials.collector_tenant_endpoint_ - tenant url including the `/api/v2/otlp`, e.g. **https://wkf10640.live.dynatrace.com/api/v2/otlp** +- _components.dt-credentials.collector_tenant_token_ - access token using the `Kubernetes: Data Ingest` template then run diff --git a/charts/astroshop/templates/dt-credentials.yaml.tmpl b/charts/astroshop/templates/dt-credentials.yaml.tmpl index 8291492..2d9e5c7 100644 --- a/charts/astroshop/templates/dt-credentials.yaml.tmpl +++ b/charts/astroshop/templates/dt-credentials.yaml.tmpl @@ -7,7 +7,7 @@ metadata: type: Opaque {{- with index .Values "components" "dt-credentials" }} stringData: - DT_ENDPOINT: {{ required "[tenantEndpoint] is required when [dt-credentials] is enabled" .tenantEndpoint }} - DT_API_TOKEN: {{ required "[tenantToken] is required when [dt-credentials] is enabled" .tenantToken }} + DT_OTEL_ENDPOINT: {{ .collector_tenant_endpoint | required "collector_tenant_endpoint is required when dt-credentials is enabled. Set it via --set-string components.dt-credentials.collector_tenant_endpoint=value or DT_OTEL_ENDPOINT environment variable" }} + DT_INGEST_TOKEN: {{ .collector_tenant_token | required "collector_tenant_token is required when dt-credentials is enabled. Set it via --set-string components.dt-credentials.collector_tenant_token=value or DT_TOKEN environment variable" }} {{- end }} {{- end }} diff --git a/charts/astroshop/templates/flagd-ui-service.yaml.issue_endpoint b/charts/astroshop/templates/flagd-ui-service.yaml.issue_endpoint new file mode 100644 index 0000000..7197110 --- /dev/null +++ b/charts/astroshop/templates/flagd-ui-service.yaml.issue_endpoint @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: flagd-ui + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: flagd-ui + app.kubernetes.io/component: flagd + app.kubernetes.io/part-of: opentelemetry-demo +spec: + type: ClusterIP + ports: + - name: ui + port: 4000 + targetPort: 4000 + protocol: TCP + selector: + app.kubernetes.io/name: flagd + app.kubernetes.io/component: flagd + \ No newline at end of file diff --git a/charts/astroshop/values.yaml b/charts/astroshop/values.yaml index 1f5ca91..6b15b07 100644 --- a/charts/astroshop/values.yaml +++ b/charts/astroshop/values.yaml @@ -1,11 +1,11 @@ components: dt-credentials: - # when enabled it requires the `tenantEndpoint` and `tenantToken` values to be set + # when enabled it requires the `collector_tenant_endpoint` and `collector_tenant_token` values to be set # when disabled make sure to create a `dt-credentials` secret with - # DT_ENDPOINT and DT_API_TOKEN values + # DT_OTEL_ENDPOINT and DT_INGEST_TOKEN values enabled: false - # tenantEndpoint: https://wkf10640.live.dynatrace.com/api/v2/otlp # example endpoint - # tenantToken: dt0c01.abc.xxx + # collector_tenant_endpoint: https://wkf10640.live.dynatrace.com/api/v2/otlp # example endpoint + # collector_tenant_token: dt0c01.abc.xxx ingress: enabled: true # used for setting host in ingress, the url set in loadgen should match this @@ -134,16 +134,67 @@ opentelemetry-demo: memory: 512Mi # --------------------------------------------------- flagd: + enabled: true + imageOverride: + repository: "ghcr.io/open-feature/flagd" + tag: "v0.11.1" + useDefault: + env: true podAnnotations: - dynatrace.com/inject: "false" - metadata.dynatrace.com/process.technology: "flagd" + oneagent.dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "flagd" + replicas: 1 + #service: + # port: 8013 + envOverrides: + - name: FLAGD_METRICS_EXPORTER + value: otel + - name: FLAGD_OTEL_COLLECTOR_URI + value: $(OTEL_COLLECTOR_NAME):4317 resources: - requests: - cpu: 25m - memory: 300Mi limits: - cpu: 250m memory: 300Mi + command: + - "/flagd-build" + - "start" + - "--uri" + - "file:./etc/flagd/demo.flagd.json" + mountedEmptyDirs: + - name: config-rw + mountPath: /etc/flagd + # flgad-ui as a sidecar container in the same pod so the flag json file can be shared + sidecarContainers: + - name: flagd-ui + useDefault: + env: true + service: + port: 4000 + envOverrides: + - name: FLAGD_METRICS_EXPORTER + value: otel + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4318 + - name: SECRET_KEY_BASE + value: "fHaF1lg9V+fJh1C6OhYFqfs/9PpQxpr8dwRYZ86P9ZlEGiTAP33lhu1Ya6iow9v0" + resources: + limits: + memory: 300Mi + volumeMounts: + - name: config-rw + mountPath: /app/data + initContainers: + - name: init-config + image: busybox + command: ['sh', '-c', 'cp /config-ro/demo.flagd.json /config-rw/demo.flagd.json && cat /config-rw/demo.flagd.json'] + volumeMounts: + - mountPath: /config-ro + name: config-ro + - mountPath: /config-rw + name: config-rw + additionalVolumes: + - name: config-ro + configMap: + name: 'flagd-config' # --------------------------------------------------- fraud-detection: podAnnotations: @@ -152,10 +203,10 @@ opentelemetry-demo: resources: requests: cpu: 25m #100 - memory: 300Mi + memory: 256Mi limits: cpu: 250m - memory: 300Mi + memory: 512Mi # --------------------------------------------------- frontend: podAnnotations: @@ -185,7 +236,7 @@ opentelemetry-demo: # --------------------------------------------------- frontend-proxy: # disable frontend proxy since we're using nginx ingress - enabled: false + enabled: true # --------------------------------------------------- image-provider: podAnnotations: @@ -217,7 +268,7 @@ opentelemetry-demo: memory: 64Mi # --------------------------------------------------- load-generator: - replicas: 2 + replicas: 1 podAnnotations: dynatrace.com/inject: "false" metadata.dynatrace.com/process.technology: "python" @@ -387,9 +438,9 @@ opentelemetry-demo: sampling_initial: 5 sampling_thereafter: 2000 otlphttp: - endpoint: "${env:DT_ENDPOINT}" + endpoint: "${env:DT_OTEL_ENDPOINT}" headers: - Authorization: "Api-Token ${env:DT_API_TOKEN}" + Authorization: "Api-Token ${env:DT_INGEST_TOKEN}" extensions: health_check: endpoint: "0.0.0.0:13133" diff --git a/charts/astroshop/values.yaml.flagdui.test b/charts/astroshop/values.yaml.flagdui.test new file mode 100644 index 0000000..44bf243 --- /dev/null +++ b/charts/astroshop/values.yaml.flagdui.test @@ -0,0 +1,743 @@ +components: + dt-credentials: + # when enabled it requires the `collector_tenant_endpoint` and `collector_tenant_token` values to be set + # when disabled make sure to create a `dt-credentials` secret with + # DT_OTEL_ENDPOINT and DT_INGEST_TOKEN values + enabled: false + # collector_tenant_endpoint: https://wkf10640.live.dynatrace.com/api/v2/otlp # example endpoint + # collector_tenant_token: dt0c01.abc.xxx + ingress: + enabled: true + # used for setting host in ingress, the url set in loadgen should match this + # host: "" + # the ip whitelist used for the ingress + # allows for limitting the allowed ips, leave empty to not add the restriction + # two separate fields are given to allow for more complicated setups + ipWhitelist: + base: [] + extra: [] + product-db-connection: + # when enabled, secret with name `product-db-connection` will be created + # set to disabled if you want to provide those values in other way + enabled: true + # these are default values for otel postgresql service + # if there are any changes to the db setup make sure they are reflected here + connectionString: postgresql://otelu:otelp@postgresql:5432/otel + +opentelemetry-demo: + default: + image: + repository: europe-docker.pkg.dev/dynatrace-demoability/docker/astroshop + tag: 237cf09 + components: + # --------------------------------------------------- + accounting: + podAnnotations: + metadata.dynatrace.com/process.technology: ".NET" + envOverrides: + - name: OTEL_DOTNET_AUTO_INSTRUMENTATION_ENABLED + value: "false" # Avoid duplicate spans from OA and Otel - https://opentelemetry.io/docs/zero-code/net/instrumentations/ + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp' + resources: + requests: + cpu: 25m + memory: 256Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + ad: + podAnnotations: + metadata.dynatrace.com/process.technology: "Java" + envOverrides: + - name: JAVA_TOOL_OPTIONS + value: "" # '-javaagent:/usr/src/app/opentelemetry-javaagent.jar' # - Duplicate spans from OA and Otel are avoided automatically - https://docs.dynatrace.com/docs/shortlink/opentelemetry-oneagent#java-span-dropping + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp + resources: + requests: + cpu: 25m # 100 + memory: 256Mi + limits: + cpu: 250m # 100 + memory: 512Mi + # --------------------------------------------------- + cart: + podAnnotations: + metadata.dynatrace.com/process.technology: ".NET" + envOverrides: + - name: OTEL_DOTNET_AUTO_INSTRUMENTATION_ENABLED + value: "false" # Avoid duplicate spans from OA and Otel - https://opentelemetry.io/docs/zero-code/net/instrumentations/ + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp' + resources: + requests: + cpu: 25m + memory: 256Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + checkout: + podAnnotations: + metadata.dynatrace.com/process.technology: "go" + envOverrides: + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp' + resources: + requests: + cpu: 25m + memory: 64Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + currency: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "cpp" + resources: + requests: + cpu: 25m + memory: 32Mi + limits: + cpu: 250m + memory: 32Mi + # --------------------------------------------------- + email: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "ruby" + resources: + requests: + cpu: 25m + memory: 512Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + flagd: + enabled: true + imageOverride: + repository: "ghcr.io/open-feature/flagd" + tag: "v0.11.1" + useDefault: + env: true + podAnnotations: + oneagent.dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "flagd" + replicas: 1 + #service: + # port: 8013 + envOverrides: + - name: FLAGD_METRICS_EXPORTER + value: otel + - name: FLAGD_OTEL_COLLECTOR_URI + value: $(OTEL_COLLECTOR_NAME):4317 + resources: + limits: + memory: 300Mi + command: + - "/flagd-build" + - "start" + - "--uri" + - "file:./etc/flagd/demo.flagd.json" + mountedEmptyDirs: + - name: config-rw + mountPath: /etc/flagd + # flgad-ui as a sidecar container in the same pod so the flag json file can be shared + sidecarContainers: + - name: flagd-ui + useDefault: + env: false + service: + port: 4000 + env: + - name: OTEL_SERVICE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/component'] + - name: OTEL_COLLECTOR_NAME + value: otel-collector + - name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE + value: cumulative + - name: OTEL_RESOURCE_ATTRIBUTES + value: service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo,service.version=2.1.3 + - name: FLAGD_METRICS_EXPORTER + value: otel + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(OTEL_COLLECTOR_NAME):4318 + - name: SECRET_KEY_BASE + value: "fHaF1lg9V+fJh1C6OhYFqfs/9PpQxpr8dwRYZ86P9ZlEGiTAP33lhu1Ya6iow9v0" + - name: PHX_HOST + value: "localhost" + - name: PHX_PORT + value: "4000" + - name: PORT + value: "4000" + - name: FLAGD_UI_SERVICE_HOST + value: "127.0.0.1" + - name: FLAGD_UI_SERVICE_PORT + value: "4000" + resources: + limits: + memory: 300Mi + volumeMounts: + - name: config-rw + mountPath: /app/data + initContainers: + - name: init-config + image: busybox + command: ['sh', '-c', 'cp /config-ro/demo.flagd.json /config-rw/demo.flagd.json && cat /config-rw/demo.flagd.json'] + volumeMounts: + - mountPath: /config-ro + name: config-ro + - mountPath: /config-rw + name: config-rw + additionalVolumes: + - name: config-ro + configMap: + name: 'flagd-config' + # --------------------------------------------------- + fraud-detection: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "Java" + resources: + requests: + cpu: 25m #100 + memory: 256Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + frontend: + podAnnotations: + metadata.dynatrace.com/process.technology: "nodejs" + envOverrides: + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp' + - name: OTEL_NODE_DISABLED_INSTRUMENTATIONS # https://github.com/open-telemetry/opentelemetry-js-contrib/blob/167dced09de0d2104561542b4f83047fa656505f/metapackages/auto-instrumentations-node/package.json#L51 + value: "" # other examples - http,grpc,dns,net + - name: NODE_OPTIONS + value: "" # - do not instrument at all with things like '-r ./Instrumentation.js' Avoid duplicate spans from OA and Otel - https://opentelemetry.io/docs/zero-code/js/ + - name: PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT # This is used on the client-side for sending traces to the backend + value: "" + - name: NEXT_OTEL_VERBOSE + value: "0" # This expects users to use `kubectl port-forward ...` + resources: + requests: + cpu: 500m + memory: 300Mi + limits: + cpu: "1" + memory: 512Mi + # --------------------------------------------------- + frontend-proxy: + # disable frontend proxy since we're using nginx ingress + enabled: true + # --------------------------------------------------- + image-provider: + podAnnotations: + dynatrace.com/inject: "false" + metrics.dynatrace.com/port: "9113" # https://www.dynatrace.com/news/blog/simplify-observability-for-all-your-custom-metrics-part-4-prometheus/ + metrics.dynatrace.com/scrape: "true" + metadata.dynatrace.com/process.technology: "nginx" + sidecarContainers: + - name: nginx-exporter + imageOverride: + repository: "nginx/nginx-prometheus-exporter" + tag: "1.3.0" + command: + [ + "/usr/bin/nginx-prometheus-exporter", + "--web.listen-address=:9113", + "--nginx.scrape-uri=http://localhost:8081/nginx/status", + ] + service: + port: 9113 + useDefault: + env: false + resources: + requests: + cpu: 25m + memory: 64Mi + limits: + cpu: 250m + memory: 64Mi + # --------------------------------------------------- + load-generator: + replicas: 1 + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "python" + envOverrides: + - name: LOCUST_HOST + value: http://frontend-proxy:8080 + - name: LOCUST_USERS + value: "2" + - name: LOCUST_HEADLESS + value: "true" + resources: + requests: + cpu: "1" + memory: 2.5Gi + limits: + cpu: "2" + memory: 2.5Gi + # --------------------------------------------------- + payment: + podAnnotations: + metadata.dynatrace.com/process.technology: "nodejs" + envOverrides: + - name: OTEL_TRACES_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_LOGS_EXPORTER + value: "none" # 'console', 'none', 'otlp' + - name: OTEL_METRICS_EXPORTER + value: "console,otlp" # 'console', 'none', 'otlp' + - name: OTEL_NODE_DISABLED_INSTRUMENTATIONS # https://github.com/open-telemetry/opentelemetry-js-contrib/blob/167dced09de0d2104561542b4f83047fa656505f/metapackages/auto-instrumentations-node/package.json#L51 + value: "" # other examples - http,grpc,dns,net + - name: NODE_OPTIONS + value: "" # - do not instrument at all with things like '-r ./Instrumentation.js' Avoid duplicate spans from OA and Otel - https://opentelemetry.io/docs/zero-code/js/ + resources: + requests: + cpu: 25m + memory: 128Mi + limits: + cpu: 250m + memory: 512Mi + # --------------------------------------------------- + product-catalog: + podAnnotations: + metadata.dynatrace.com/process.technology: "go" + envOverrides: + - name: PRODUCT_CATALOG_DB_CONNECTION + valueFrom: + secretKeyRef: + name: product-db-connection + key: connection_string + resources: + requests: + cpu: 25m + memory: 64Mi + limits: + memory: 512Mi + # --------------------------------------------------- + quote: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "PHP" + envOverrides: + - name: OTEL_PHP_AUTOLOAD_ENABLED + value: "true" + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 50m + memory: 64Mi + # --------------------------------------------------- + recommendation: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "python" + resources: + requests: + cpu: 25m + memory: 64Mi + limits: + cpu: 250m + memory: 64Mi + # --------------------------------------------------- + shipping: + podAnnotations: + dynatrace.com/inject: "false" + metadata.dynatrace.com/process.technology: "rust" + resources: + requests: + cpu: 25m + memory: 64Mi + limits: + cpu: 250m + memory: 64Mi + # --------------------------------------------------- + kafka: + podAnnotations: + metadata.dynatrace.com/process.technology: "kafka" + envOverrides: + - name: KAFKA_OPTS + value: "-Dotel.jmx.target.system=kafka-broker" + resources: + requests: + cpu: 25m + memory: 512Mi + limits: + cpu: 250m + memory: 1024Mi + # --------------------------------------------------- + postgresql: + resources: + requests: + cpu: 25m + memory: 100Mi + limits: + cpu: 250m + memory: 100Mi + # --------------------------------------------------- + valkey-cart: + ports: + - name: valkey-cart + value: 6379 + podAnnotations: + dynatrace.com/inject: "false" + metrics.dynatrace.com/port: "9121" # https://www.dynatrace.com/news/blog/simplify-observability-for-all-your-custom-metrics-part-4-prometheus/ + metrics.dynatrace.com/scrape: "true" + metadata.dynatrace.com/process.technology: "redis" + sidecarContainers: + - name: valkey-exporter + command: ["/redis_exporter", "--web.listen-address=0.0.0.0:9121"] + imageOverride: + repository: "oliver006/redis_exporter" + tag: "v1.14.0" + service: + port: 9121 + useDefault: + env: false + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 50m + memory: 512Mi + ### --------------------------------------------------- + opensearch: + podAnnotations: + metadata.dynatrace.com/process.technology: "elasticsearch" + enabled: false + ### --------------------------------------------------- + opentelemetry-collector: + nameOverride: "otel-gateway-collector" + mode: "deployment" + podAnnotations: + dynatrace.com/inject: "false" + resources: + requests: + cpu: 25m + memory: 128Mi + limits: + cpu: 250m + memory: 512Mi + config: + exporters: + debug: + verbosity: basic + sampling_initial: 5 + sampling_thereafter: 2000 + otlphttp: + endpoint: "${env:DT_OTEL_ENDPOINT}" + headers: + Authorization: "Api-Token ${env:DT_INGEST_TOKEN}" + extensions: + health_check: + endpoint: "0.0.0.0:13133" + path: "/" + processors: + cumulativetodelta: {} + memory_limiter: + check_interval: 1s + limit_percentage: 75 + spike_limit_percentage: 15 + batch: + send_batch_size: 10000 + timeout: 10s + resourcedetection/aks: + detectors: [env, aks] + timeout: 2s + override: false + k8sattributes: + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.statefulset.name + - k8s.daemonset.name + - k8s.cronjob.name + - k8s.namespace.name + - k8s.node.name + - k8s.cluster.uid + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.name + - from: resource_attribute + name: k8s.namespace.name + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + filter/ottl: + error_mode: ignore + traces: + span: + - | + resource.attributes["service.name"] == "checkout" or + resource.attributes["service.name"] == "frontend" or + resource.attributes["service.name"] == "payment" or + resource.attributes["service.name"] == "product-catalog" or + IsMatch(resource.attributes["host.name"], ".*product-catalog.*") + spanevent: + - | + resource.attributes["service.name"] == "checkout" or + resource.attributes["service.name"] == "frontend" or + resource.attributes["service.name"] == "payment" or + resource.attributes["service.name"] == "product-catalog" or + IsMatch(resource.attributes["host.name"], ".*product-catalog.*") + metrics: + metric: + - | + resource.attributes["service.name"] == "checkout" or + resource.attributes["service.name"] == "frontend" or + resource.attributes["service.name"] == "payment" or + resource.attributes["service.name"] == "product-catalog" or + IsMatch(resource.attributes["host.name"], ".*product-catalog.*") + logs: + log_record: + - | + resource.attributes["service.name"] == "checkout" or + resource.attributes["service.name"] == "frontend" or + resource.attributes["service.name"] == "payment" or + resource.attributes["service.name"] == "product-catalog" or + IsMatch(resource.attributes["host.name"], ".*product-catalog.*") + transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + - set(attributes["k8s.workload.kind"], "statefulset") where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.statefulset.name"]) where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.kind"], "deployment") where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.deployment.name"]) where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.kind"], "daemonset") where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.daemonset.name"]) where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.cluster.id"], attributes["k8s.cluster.uid"]) where IsString(attributes["k8s.cluster.uid"]) + - context: span + statements: + # - set(name, "NO_NAME") where name == "" + # could be removed when https://github.com/vercel/next.js/pull/64852 is fixed upstream + - replace_pattern(name, "\\?.*", "") + - replace_match(name, "GET /api/products/*", "GET /api/products/{productId}") + log_statements: + - context: resource + statements: + - set(attributes["k8s.workload.kind"], "statefulset") where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.statefulset.name"]) where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.kind"], "deployment") where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.deployment.name"]) where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.kind"], "daemonset") where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.daemonset.name"]) where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.cluster.id"], attributes["k8s.cluster.uid"]) where IsString(attributes["k8s.cluster.uid"]) + metric_statements: + - context: resource + statements: + - set(attributes["k8s.workload.kind"], "statefulset") where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.statefulset.name"]) where IsString(attributes["k8s.statefulset.name"]) + - set(attributes["k8s.workload.kind"], "deployment") where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.deployment.name"]) where IsString(attributes["k8s.deployment.name"]) + - set(attributes["k8s.workload.kind"], "daemonset") where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.workload.name"], attributes["k8s.daemonset.name"]) where IsString(attributes["k8s.daemonset.name"]) + - set(attributes["k8s.cluster.id"], attributes["k8s.cluster.uid"]) where IsString(attributes["k8s.cluster.uid"]) + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + hostmetrics: + root_path: /hostfs + scrapers: + cpu: + metrics: + system.cpu.utilization: + enabled: true + disk: {} + load: {} + filesystem: + exclude_mount_points: + mount_points: + - /dev/* + - /proc/* + - /sys/* + - /run/k3s/containerd/* + - /var/lib/docker/* + - /var/lib/kubelet/* + - /snap/* + match_type: regexp + exclude_fs_types: + fs_types: + - autofs + - binfmt_misc + - bpf + - cgroup2 + - configfs + - debugfs + - devpts + - devtmpfs + - fusectl + - hugetlbfs + - iso9660 + - mqueue + - nsfs + - overlay + - proc + - procfs + - pstore + - rpc_pipefs + - securityfs + - selinuxfs + - squashfs + - sysfs + - tracefs + match_type: strict + memory: + metrics: + system.memory.utilization: + enabled: true + network: {} + paging: {} + processes: {} + process: + mute_process_exe_error: true + mute_process_io_error: true + mute_process_user_error: true + service: + pipelines: + traces: + receivers: [otlp] + processors: + [ + memory_limiter, + resourcedetection/aks, + k8sattributes, + filter/ottl, + transform, + batch, + ] + exporters: [otlphttp, spanmetrics, debug] # debug + metrics: + receivers: [otlp, spanmetrics] # hostmetrics - permission denied + processors: + [ + memory_limiter, + cumulativetodelta, + resourcedetection/aks, + k8sattributes, + filter/ottl, + transform, + batch, + ] + exporters: [otlphttp, debug] # debug + logs: + receivers: [otlp] + processors: + [ + memory_limiter, + resourcedetection/aks, + k8sattributes, + filter/ottl, + transform, + batch, + ] + exporters: [otlphttp, debug] # debug + extensions: + - health_check + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 + level: detailed + readers: + - periodic: + exporter: + otlp: + endpoint: 127.0.0.1:4317 + protocol: grpc + interval: 10000 + timeout: 5000 + serviceAccount: + create: true + name: "astroshop-otel-gateway-collector" + extraEnvsFrom: + - secretRef: + name: dt-credentials + extraVolumes: + - name: hostfs + hostPath: + path: / + # This also supports template content, which will eventually be converted to yaml. + extraVolumeMounts: + - mountPath: /hostfs + name: hostfs + readOnly: true + ports: + jaeger-compact: + enabled: false + jaeger-thrift: + enabled: false + jaeger-grpc: + enabled: false + zipkin: + enabled: false + metrics: + enabled: false + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + httpGet: + port: 13133 + path: "/" + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + httpGet: + port: 13133 + path: "/" + ### --------------------------------------------------- + jaeger: + enabled: false + ### --------------------------------------------------- + prometheus: + enabled: false + ### --------------------------------------------------- + grafana: + enabled: false diff --git a/config/helm-values/test.yaml b/config/helm-values/test.yaml index e3ba1b9..41f6dee 100644 --- a/config/helm-values/test.yaml +++ b/config/helm-values/test.yaml @@ -3,5 +3,5 @@ components: host: ingress.host.test dt-credentials: enabled: true - tenantEndpoint: tenant.endpoint.test - tenantToken: tenant.token.test + collector_tenant_endpoint: tenant.endpoint.test + collector_tenant_token: tenant.token.test diff --git a/config/helm-values/values.yaml b/config/helm-values/values.yaml index 5d87050..b5b2667 100644 --- a/config/helm-values/values.yaml +++ b/config/helm-values/values.yaml @@ -4,5 +4,10 @@ components: # host: ingress.host.com dt-credentials: enabled: true - # tenantEndpoint: https://wkf10640.live.dynatrace.com/api/v2/otlp # example endpoint - # tenantToken: dt0c01.abc.xxx + collector_tenant_endpoint: https://abcde.live.dynatrace.com/api/v2/otlp # example endpoint + collector_tenant_token: dt0c01.XXXX + #frontend-proxy: + # disable frontend proxy since we're using nginx ingress + # enabled: true + #load-generator: + # replicas: 1 diff --git a/envoy/envoy.yaml b/envoy/envoy.yaml new file mode 100644 index 0000000..4a2e78c --- /dev/null +++ b/envoy/envoy.yaml @@ -0,0 +1,258 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + + +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8080 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + tracing: + spawn_upstream_span: true + provider: + name: envoy.tracers.opentelemetry + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v3.OpenTelemetryConfig + grpc_service: + envoy_grpc: + cluster_name: opentelemetry_collector_grpc + timeout: 0.250s + service_name: frontend-proxy + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.environment + typed_config: + "@type": type.googleapis.com/envoy.extensions.tracers.opentelemetry.resource_detectors.v3.EnvironmentResourceDetectorConfig + route_config: + name: local_route + virtual_hosts: + - name: frontend + domains: + - "*" + routes: + - match: { path: "/loadgen" } + redirect: { path_redirect: "/loadgen/" } + - match: { prefix: "/loadgen/" } + route: { cluster: loadgen, prefix_rewrite: "/" } + - match: { prefix: "/otlp-http/" } + route: { cluster: opentelemetry_collector_http, prefix_rewrite: "/" } + - match: { path: "/jaeger" } + redirect: { path_redirect: "/jaeger/" } + - match: { prefix: "/jaeger/" } + route: { cluster: jaeger } + - match: { path: "/grafana" } + redirect: { path_redirect: "/grafana/" } + - match: { prefix: "/grafana/" } + route: { cluster: grafana } + - match: { prefix: "/images/" } + route: { cluster: image-provider, prefix_rewrite: "/" } + - match: { prefix: "/flagservice/" } + route: { cluster: flagservice, prefix_rewrite: "/", timeout: 0s } + - match: { prefix: "/feature" } + route: + cluster: flagd-ui + prefix_rewrite: "/" + upgrade_configs: + - upgrade_type: websocket + - match: { prefix: "/" } # Default/catch-all route - keep last since prefix:"/" matches everything + route: { cluster: frontend } + http_filters: + - name: envoy.filters.http.fault + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault + max_active_faults: 100 + delay: + header_delay: {} + percentage: + numerator: 100 + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + access_log: + - name: envoy.access_loggers.open_telemetry + typed_config: + "@type": "type.googleapis.com/envoy.extensions.access_loggers.open_telemetry.v3.OpenTelemetryAccessLogConfig" + common_config: + log_name: "otel_envoy_access_log" + grpc_service: + envoy_grpc: + cluster_name: opentelemetry_collector_grpc + transport_api_version: "V3" + body: + # yamllint disable-line rule:line-length + string_value: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS% %CONNECTION_TERMINATION_DETAILS% \"%UPSTREAM_TRANSPORT_FAILURE_REASON%\" %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" %UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%\n" + resource_attributes: + values: + - key: "service.name" + value: + string_value: frontend-proxy + attributes: + values: + - key: "destination.address" + value: + string_value: "%UPSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + - key: "event.name" + value: + string_value: "proxy.access" + - key: "server.address" + value: + string_value: "%DOWNSTREAM_LOCAL_ADDRESS%" + - key: "source.address" + value: + string_value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + - key: "upstream.cluster" + value: + string_value: "%UPSTREAM_CLUSTER%" + - key: "upstream.host" + value: + string_value: "%UPSTREAM_HOST%" + - key: "user_agent.original" + value: + string_value: "%REQ(USER-AGENT)%" + - key: "url.full" + value: + string_value: "%REQ(:SCHEME)%://%REQ(:AUTHORITY)%%REQ(:PATH)%" + - key: "url.path" + value: + string_value: "%REQ(:PATH)%" + - key: "url.query" + value: + string_value: "%REQ(:QUERY)%" + - key: "url.template" + value: + string_value: "%ROUTE_NAME%" + clusters: + - name: opentelemetry_collector_grpc + type: STRICT_DNS + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: opentelemetry_collector_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: otel-collector + port_value: 4317 + - name: opentelemetry_collector_http + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: opentelemetry_collector_http + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: otel-collector + port_value: 4318 + - name: frontend + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: frontend + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: frontend + port_value: 8080 + - name: image-provider + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: image-provider + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: image-provider + port_value: 8081 + - name: flagservice + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: flagservice + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: flagd + port_value: 8013 + - name: flagd-ui + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: flagd-ui + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: flagd-ui + port_value: 4000 + - name: loadgen + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: loadgen + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: load-generator + port_value: 8089 + - name: grafana + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: grafana + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: grafana + port_value: 80 + - name: jaeger + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: jaeger + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: jaeger-query + port_value: 16686 +admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 +layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 diff --git a/flagd/flagd-ui-service.yaml b/flagd/flagd-ui-service.yaml new file mode 100644 index 0000000..99af33d --- /dev/null +++ b/flagd/flagd-ui-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: flagd-ui + namespace: "astroshop" + labels: + app.kubernetes.io/name: flagd-ui + app.kubernetes.io/component: flagd + app.kubernetes.io/part-of: opentelemetry-demo +spec: + type: ClusterIP + ports: + - name: ui + port: 4000 + targetPort: 4000 + protocol: TCP + selector: + app.kubernetes.io/name: flagd + app.kubernetes.io/component: flagd + \ No newline at end of file